aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll7
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir55
-rw-r--r--llvm/test/CodeGen/AArch64/build-vector-two-dup.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll9
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll7
-rw-r--r--llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll332
-rw-r--r--llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll13
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.sincospi.ll308
-rw-r--r--llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir133
-rw-r--r--llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll85
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll80
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll40
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll72
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll75
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll80
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll40
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sve2-xar.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/zext-to-tbl.ll92
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll38
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll420
-rw-r--r--llvm/test/CodeGen/AMDGPU/and.ll100
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-math.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/bfe-patterns.ll84
-rw-r--r--llvm/test/CodeGen/AMDGPU/bfi_nested.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/bfm.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/bitreverse.ll42
-rw-r--r--llvm/test/CodeGen/AMDGPU/build_vector.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/cc-entry.ll69
-rw-r--r--llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll32
-rw-r--r--llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll46
-rw-r--r--llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll113
-rw-r--r--llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll198
-rw-r--r--llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll56
-rw-r--r--llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll89
-rw-r--r--llvm/test/CodeGen/AMDGPU/fabs.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.ll84
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmin_legacy.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/fnearbyint.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-fabs.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_sint.ll85
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_uint.ll48
-rw-r--r--llvm/test/CodeGen/AMDGPU/fshl.ll19
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll42
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp10.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp2.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log.ll37
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log10.ll37
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log2.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll2050
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll2106
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll2106
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll2398
-rw-r--r--llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad_uint24.ll42
-rw-r--r--llvm/test/CodeGen/AMDGPU/max.ll86
-rw-r--r--llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/memmove-var-size.ll408
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul_int24.ll129
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll106
-rw-r--r--llvm/test/CodeGen/AMDGPU/nofpclass-call.ll191
-rw-r--r--llvm/test/CodeGen/AMDGPU/or.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/shl.v2i16.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/sign_extend.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/skip-if-dead.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll63
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub.v2i16.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv.ll62
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv64.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/while-break.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/xor.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll15
-rw-r--r--llvm/test/CodeGen/ARM/llvm.sincospi.ll249
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll59
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll49
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll30
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll145
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll64
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll46
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll101
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll121
-rw-r--r--llvm/test/CodeGen/Hexagon/instrprof-custom.ll7
-rw-r--r--llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll48
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.sincos.ll72
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.sincospi.ll21
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll25
-rw-r--r--llvm/test/CodeGen/PowerPC/milicode64.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll4
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll18
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll24
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll34
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll91
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-float32regloops.ll211
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-increment.ll24
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll90
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll52
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-shuffle.ll13
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vld4.ll13
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll24
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vst4.ll14
-rw-r--r--llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll2
-rw-r--r--llvm/test/CodeGen/X86/3addr-16bit.ll48
-rw-r--r--llvm/test/CodeGen/X86/atomic-rm-bit-test.ll22
-rw-r--r--llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll3
-rw-r--r--llvm/test/CodeGen/X86/basic-block-sections-list.ll62
-rw-r--r--llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll8
-rw-r--r--llvm/test/CodeGen/X86/bitcast-vector-bool.ll32
-rw-r--r--llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll4
-rw-r--r--llvm/test/CodeGen/X86/fold-loop-of-urem.ll81
-rw-r--r--llvm/test/CodeGen/X86/freeze-binary.ll26
-rw-r--r--llvm/test/CodeGen/X86/i128-mul.ll178
-rw-r--r--llvm/test/CodeGen/X86/icmp-abs-C.ll22
-rw-r--r--llvm/test/CodeGen/X86/llvm.sincospi.ll233
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll12
-rw-r--r--llvm/test/CodeGen/X86/midpoint-int.ll28
-rw-r--r--llvm/test/CodeGen/X86/mmx-arith.ll3
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i16.ll8
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i32.ll16
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i8.ll4
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-0.ll211
-rw-r--r--llvm/test/CodeGen/X86/parity.ll30
-rw-r--r--llvm/test/CodeGen/X86/pr166744.ll14
-rw-r--r--llvm/test/CodeGen/X86/rotate-extract.ll4
-rw-r--r--llvm/test/CodeGen/X86/smul_fix.ll8
-rw-r--r--llvm/test/CodeGen/X86/sshl_sat.ll40
-rw-r--r--llvm/test/CodeGen/X86/sshl_sat_vec.ll113
-rw-r--r--llvm/test/CodeGen/X86/stackmap.ll9
-rw-r--r--llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll210
-rw-r--r--llvm/test/CodeGen/X86/twoaddr-lea.ll2
-rw-r--r--llvm/test/CodeGen/X86/umul_fix.ll8
-rw-r--r--llvm/test/CodeGen/X86/ushl_sat.ll28
-rw-r--r--llvm/test/CodeGen/X86/ushl_sat_vec.ll111
-rw-r--r--llvm/test/CodeGen/X86/vector-mulfix-legalize.ll34
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll160
-rw-r--r--llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll6065
-rw-r--r--llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll1344
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll328
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll95
-rw-r--r--llvm/test/CodeGen/X86/x86-shrink-wrapping.ll18
-rw-r--r--llvm/test/CodeGen/X86/xor.ll132
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s80
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s72
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s72
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s96
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s96
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s96
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s96
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt96
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt96
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt96
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll5
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll21
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll58
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll76
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr50686.ll18
-rw-r--r--llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll21
-rw-r--r--llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll20
-rw-r--r--llvm/test/tools/llc/save-stats.ll1
202 files changed, 14044 insertions, 12746 deletions
diff --git a/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll
index e086ab9..33ea749 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll
@@ -52,12 +52,11 @@ define <2 x i64> @test_mul_sub_2x64_2(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c,
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT: // kill: def $q3 killed $q3 def $z3
; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q3 killed $q3 def $z3
; CHECK-NEXT: sdiv z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mul z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: sub v0.2d, v1.2d, v0.2d
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d
; CHECK-NEXT: ret
%div = sdiv <2 x i64> %a, %b
%mul = mul <2 x i64> %c, %d
diff --git a/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
new file mode 100644
index 0000000..8c31e7c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
@@ -0,0 +1,55 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s | FileCheck %s
+--- |
+ define i32 @test01() nounwind {
+ entry:
+ %0 = select i1 true, i32 1, i32 0
+ %1 = and i32 %0, 65535
+ %2 = icmp sgt i32 %1, 0
+ br i1 %2, label %if.then, label %if.end
+
+ if.then: ; preds = %entry
+ ret i32 1
+
+ if.end: ; preds = %entry
+ ret i32 0
+ }
+...
+---
+name: test01
+registers:
+ - { id: 0, class: gpr32 }
+ - { id: 1, class: gpr32common }
+body: |
+ ; CHECK-LABEL: name: test01
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32common = ANDSWri killed [[ANDSWri]], 15, implicit-def $nzcv
+ ; CHECK-NEXT: Bcc 12, %bb.2, implicit $nzcv
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.if.then:
+ ; CHECK-NEXT: $w0 = MOVi32imm 1
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.if.end:
+ ; CHECK-NEXT: $w0 = MOVi32imm 0
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ bb.0.entry:
+ successors: %bb.2.if.end, %bb.1.if.then
+
+ %0 = MOVi32imm 1
+ %1 = ANDWri killed %1, 15
+ $wzr = SUBSWri killed %1, 0, 0, implicit-def $nzcv
+ Bcc 12, %bb.2.if.end, implicit $nzcv
+
+ bb.1.if.then:
+ $w0 = MOVi32imm 1
+ RET_ReallyLR implicit $w0
+
+ bb.2.if.end:
+ $w0 = MOVi32imm 0
+ RET_ReallyLR implicit $w0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
index dbbfbea..f725c19 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
@@ -188,11 +188,11 @@ entry:
define <8 x i8> @test11(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
; CHECK-LABEL: test11:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ld1r { v1.8b }, [x0]
-; CHECK-NEXT: ld1r { v2.8b }, [x1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.h[2], v2.h[0]
-; CHECK-NEXT: mov v0.h[3], v1.h[0]
+; CHECK-NEXT: ld1r { v0.8b }, [x0]
+; CHECK-NEXT: ld1r { v1.8b }, [x1]
+; CHECK-NEXT: fmov d2, d0
+; CHECK-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-NEXT: mov v0.h[3], v2.h[0]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
index 533e831..258eaab 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
@@ -14,13 +14,12 @@ define <vscale x 4 x double> @mull_add(<vscale x 4 x double> %a, <vscale x 4 x d
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmul z7.d, z0.d, z1.d
; CHECK-NEXT: fmul z1.d, z6.d, z1.d
-; CHECK-NEXT: movprfx z3, z7
-; CHECK-NEXT: fmla z3.d, p0/m, z6.d, z2.d
+; CHECK-NEXT: fmad z6.d, p0/m, z2.d, z7.d
; CHECK-NEXT: fnmsb z0.d, p0/m, z2.d, z1.d
; CHECK-NEXT: uzp2 z1.d, z4.d, z5.d
; CHECK-NEXT: uzp1 z2.d, z4.d, z5.d
; CHECK-NEXT: fadd z2.d, z2.d, z0.d
-; CHECK-NEXT: fadd z1.d, z3.d, z1.d
+; CHECK-NEXT: fadd z1.d, z6.d, z1.d
; CHECK-NEXT: zip1 z0.d, z2.d, z1.d
; CHECK-NEXT: zip2 z1.d, z2.d, z1.d
; CHECK-NEXT: ret
@@ -225,17 +224,14 @@ define <vscale x 4 x double> @mul_add_rot_mull(<vscale x 4 x double> %a, <vscale
; CHECK-NEXT: fmul z1.d, z25.d, z1.d
; CHECK-NEXT: fmul z3.d, z4.d, z24.d
; CHECK-NEXT: fmul z24.d, z5.d, z24.d
-; CHECK-NEXT: movprfx z7, z26
-; CHECK-NEXT: fmla z7.d, p0/m, z25.d, z2.d
+; CHECK-NEXT: fmad z25.d, p0/m, z2.d, z26.d
; CHECK-NEXT: fnmsb z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: movprfx z1, z3
-; CHECK-NEXT: fmla z1.d, p0/m, z6.d, z5.d
-; CHECK-NEXT: movprfx z2, z24
-; CHECK-NEXT: fnmls z2.d, p0/m, z4.d, z6.d
-; CHECK-NEXT: fadd z2.d, z0.d, z2.d
-; CHECK-NEXT: fadd z1.d, z7.d, z1.d
-; CHECK-NEXT: zip1 z0.d, z2.d, z1.d
-; CHECK-NEXT: zip2 z1.d, z2.d, z1.d
+; CHECK-NEXT: fmla z3.d, p0/m, z6.d, z5.d
+; CHECK-NEXT: fnmsb z4.d, p0/m, z6.d, z24.d
+; CHECK-NEXT: fadd z1.d, z0.d, z4.d
+; CHECK-NEXT: fadd z2.d, z25.d, z3.d
+; CHECK-NEXT: zip1 z0.d, z1.d, z2.d
+; CHECK-NEXT: zip2 z1.d, z1.d, z2.d
; CHECK-NEXT: ret
entry:
%strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
index 1eed972..b68c009 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
@@ -200,12 +200,10 @@ define <vscale x 4 x double> @mul_add_rot_mull(<vscale x 4 x double> %a, <vscale
; CHECK-NEXT: fmul z3.d, z2.d, z25.d
; CHECK-NEXT: fmul z25.d, z24.d, z25.d
; CHECK-NEXT: fmla z3.d, p0/m, z24.d, z0.d
-; CHECK-NEXT: movprfx z24, z25
-; CHECK-NEXT: fmla z24.d, p0/m, z26.d, z1.d
-; CHECK-NEXT: movprfx z6, z24
-; CHECK-NEXT: fmla z6.d, p0/m, z5.d, z4.d
+; CHECK-NEXT: fmla z25.d, p0/m, z26.d, z1.d
+; CHECK-NEXT: fmla z25.d, p0/m, z5.d, z4.d
; CHECK-NEXT: fmla z3.d, p0/m, z26.d, z4.d
-; CHECK-NEXT: fnmsb z2.d, p0/m, z0.d, z6.d
+; CHECK-NEXT: fnmsb z2.d, p0/m, z0.d, z25.d
; CHECK-NEXT: fmsb z1.d, p0/m, z5.d, z3.d
; CHECK-NEXT: zip1 z0.d, z2.d, z1.d
; CHECK-NEXT: zip2 z1.d, z2.d, z1.d
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
index c2fc959..583391c 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
@@ -17,11 +17,10 @@ define <vscale x 4 x half> @complex_add_v4f16(<vscale x 4 x half> %a, <vscale x
; CHECK-NEXT: uunpklo z3.d, z3.s
; CHECK-NEXT: uunpklo z1.d, z1.s
; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z3
-; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z2.h
-; CHECK-NEXT: zip2 z2.d, z0.d, z1.d
-; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
-; CHECK-NEXT: uzp1 z0.s, z0.s, z2.s
+; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: zip2 z1.d, z0.d, z2.d
+; CHECK-NEXT: zip1 z0.d, z0.d, z2.d
+; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: ret
entry:
%a.deinterleaved = tail call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.vector.deinterleave2.nxv4f16(<vscale x 4 x half> %a)
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
index 061fd07..00b0095 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
@@ -18,11 +18,10 @@ define <vscale x 4 x i16> @complex_mul_v4i16(<vscale x 4 x i16> %a, <vscale x 4
; CHECK-NEXT: uzp2 z1.d, z1.d, z3.d
; CHECK-NEXT: mul z5.d, z2.d, z0.d
; CHECK-NEXT: mul z2.d, z2.d, z4.d
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: mla z3.d, p0/m, z1.d, z4.d
+; CHECK-NEXT: mad z4.d, p0/m, z1.d, z5.d
; CHECK-NEXT: msb z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT: zip2 z1.d, z0.d, z3.d
-; CHECK-NEXT: zip1 z0.d, z0.d, z3.d
+; CHECK-NEXT: zip2 z1.d, z0.d, z4.d
+; CHECK-NEXT: zip1 z0.d, z0.d, z4.d
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
index 33c5ba7..8297fa2 100644
--- a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
+++ b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
@@ -161,6 +161,338 @@ define i1 @lt64_u16_and_23(i64 %0) {
ret i1 %3
}
+define i1 @test_disjoint(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: tst w9, w8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp eq i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: tst w9, w8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp sgt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint3:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: tst w9, w8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp slt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint4(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: cmp w8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp sle i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_4(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint_inverse_4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bic w8, w9, w8
+; CHECK-NEXT: cmp w8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp sle i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bics wzr, w9, w8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp eq i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint2_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bics wzr, w9, w8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp sgt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint3_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bics wzr, w9, w8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp slt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: tst x9, x8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp eq i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint2_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: tst x9, x8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp sgt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint3_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: tst x9, x8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp slt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint4_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint4_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: and x8, x9, x8
+; CHECK-NEXT: cmp x8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp sle i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_4_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_inverse_4_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bic x8, x9, x8
+; CHECK-NEXT: cmp x8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp sle i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bics xzr, x9, x8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp eq i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint2_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bics xzr, x9, x8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp sgt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint3_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bics xzr, x9, x8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp slt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
; negative test
define i1 @lt3_u8(i8 %0) {
; CHECK-LABEL: lt3_u8:
diff --git a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
index 47fae5a..f0abbaa 100644
--- a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
@@ -1148,11 +1148,10 @@ define <vscale x 4 x i64> @fshl_rot_illegal_i64(<vscale x 4 x i64> %a, <vscale x
; CHECK-NEXT: and z3.d, z3.d, #0x3f
; CHECK-NEXT: lslr z4.d, p0/m, z4.d, z0.d
; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z2.d
-; CHECK-NEXT: movprfx z2, z1
-; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z5.d
+; CHECK-NEXT: lslr z5.d, p0/m, z5.d, z1.d
; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z3.d
; CHECK-NEXT: orr z0.d, z4.d, z0.d
-; CHECK-NEXT: orr z1.d, z2.d, z1.d
+; CHECK-NEXT: orr z1.d, z5.d, z1.d
; CHECK-NEXT: ret
%fshl = call <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b)
ret <vscale x 4 x i64> %fshl
diff --git a/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll b/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll
new file mode 100644
index 0000000..d074d9a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll
@@ -0,0 +1,13 @@
+; RUN: not llc -mtriple=aarch64-gnu-linux -filetype=null %s 2>&1 | FileCheck %s
+
+; CHECK: error: no libcall available for fsincospi
+define { float, float } @test_sincospi_f32(float %a) {
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+; CHECK: error: no libcall available for fsincospi
+define { double, double } @test_sincospi_f64(double %a) {
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
diff --git a/llvm/test/CodeGen/AArch64/llvm.sincospi.ll b/llvm/test/CodeGen/AArch64/llvm.sincospi.ll
index d1d7d92..b386df0 100644
--- a/llvm/test/CodeGen/AArch64/llvm.sincospi.ll
+++ b/llvm/test/CodeGen/AArch64/llvm.sincospi.ll
@@ -1,268 +1,250 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=arm64-apple-macosx10.9 < %s | FileCheck %s
-define { half, half } @test_sincospi_f16(half %a) {
+define { half, half } @test_sincospi_f16(half %a) #0 {
; CHECK-LABEL: test_sincospi_f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldp s1, s0, [sp, #8]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: fcvt h1, s1
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincospi.f16(half %a)
ret { half, half } %result
}
-define half @test_sincospi_f16_only_use_sin(half %a) {
+define half @test_sincospi_f16_only_use_sin(half %a) #0 {
; CHECK-LABEL: test_sincospi_f16_only_use_sin:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldr s0, [sp, #12]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h0, s0
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincospi.f16(half %a)
%result.0 = extractvalue { half, half } %result, 0
ret half %result.0
}
-define half @test_sincospi_f16_only_use_cos(half %a) {
+define half @test_sincospi_f16_only_use_cos(half %a) #0 {
; CHECK-LABEL: test_sincospi_f16_only_use_cos:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldr s0, [sp, #8]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h0, s0
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincospi.f16(half %a)
%result.1 = extractvalue { half, half } %result, 1
ret half %result.1
}
-define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) {
+define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 {
; CHECK-LABEL: test_sincospi_v2f16:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov h1, v0.h[1]
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #36
-; CHECK-NEXT: add x1, sp, #32
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: mov h1, v0[1]
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
; CHECK-NEXT: add x0, sp, #28
; CHECK-NEXT: add x1, sp, #24
+; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: fcvt s0, h1
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #20
+; CHECK-NEXT: add x1, sp, #16
; CHECK-NEXT: fcvt s0, h0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #36
+; CHECK-NEXT: add x1, sp, #32
+; CHECK-NEXT: mov h0, v0[2]
+; CHECK-NEXT: fcvt s0, h0
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
; CHECK-NEXT: add x0, sp, #44
; CHECK-NEXT: add x1, sp, #40
-; CHECK-NEXT: mov h0, v0.h[2]
-; CHECK-NEXT: fcvt s0, h0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: add x0, sp, #60
-; CHECK-NEXT: add x1, sp, #56
-; CHECK-NEXT: mov h0, v0.h[3]
+; CHECK-NEXT: mov h0, v0[3]
; CHECK-NEXT: fcvt s0, h0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldp s2, s0, [sp, #32]
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: ldp s3, s1, [sp, #24]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldp s2, s0, [sp, #24]
+; CHECK-NEXT: ldp s3, s1, [sp, #16]
+; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h4, s0
; CHECK-NEXT: fcvt h2, s2
; CHECK-NEXT: fcvt h0, s1
; CHECK-NEXT: fcvt h1, s3
-; CHECK-NEXT: ldp s5, s3, [sp, #40]
+; CHECK-NEXT: ldp s5, s3, [sp, #32]
; CHECK-NEXT: fcvt h3, s3
-; CHECK-NEXT: mov v0.h[1], v4.h[0]
+; CHECK-NEXT: mov.h v0[1], v4[0]
; CHECK-NEXT: fcvt h4, s5
-; CHECK-NEXT: mov v1.h[1], v2.h[0]
-; CHECK-NEXT: ldp s5, s2, [sp, #56]
-; CHECK-NEXT: mov v0.h[2], v3.h[0]
+; CHECK-NEXT: mov.h v1[1], v2[0]
+; CHECK-NEXT: ldp s5, s2, [sp, #40]
+; CHECK-NEXT: mov.h v0[2], v3[0]
; CHECK-NEXT: fcvt h2, s2
; CHECK-NEXT: fcvt h3, s5
-; CHECK-NEXT: mov v1.h[2], v4.h[0]
-; CHECK-NEXT: mov v0.h[3], v2.h[0]
-; CHECK-NEXT: mov v1.h[3], v3.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT: mov.h v1[2], v4[0]
+; CHECK-NEXT: mov.h v0[3], v2[0]
+; CHECK-NEXT: mov.h v1[3], v3[0]
+; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ; kill: def $d1 killed $d1 killed $q1
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
%result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a)
ret { <2 x half>, <2 x half> } %result
}
-define { float, float } @test_sincospi_f32(float %a) {
+define { float, float } @test_sincospi_f32(float %a) #0 {
; CHECK-LABEL: test_sincospi_f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldp s1, s0, [sp, #8]
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { float, float } @llvm.sincospi.f32(float %a)
ret { float, float } %result
}
-define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) {
+define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 {
; CHECK-LABEL: test_sincospi_v3f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #80
-; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
-; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w21, -24
-; CHECK-NEXT: .cfi_offset w22, -32
-; CHECK-NEXT: .cfi_offset w30, -48
-; CHECK-NEXT: add x0, sp, #20
-; CHECK-NEXT: add x1, sp, #16
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #96
; CHECK-NEXT: add x0, sp, #28
; CHECK-NEXT: add x1, sp, #24
-; CHECK-NEXT: add x19, sp, #28
-; CHECK-NEXT: add x20, sp, #24
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
+; CHECK-NEXT: ; kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #36
+; CHECK-NEXT: add x1, sp, #32
+; CHECK-NEXT: add x19, sp, #36
+; CHECK-NEXT: add x20, sp, #32
+; CHECK-NEXT: mov s0, v0[1]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
; CHECK-NEXT: add x0, sp, #44
; CHECK-NEXT: add x1, sp, #40
; CHECK-NEXT: add x21, sp, #44
; CHECK-NEXT: add x22, sp, #40
-; CHECK-NEXT: mov s0, v0.s[2]
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldp s1, s0, [sp, #16]
-; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.s }[1], [x19]
-; CHECK-NEXT: ld1 { v1.s }[1], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.s }[2], [x21]
-; CHECK-NEXT: ld1 { v1.s }[2], [x22]
-; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: add sp, sp, #80
+; CHECK-NEXT: mov s0, v0[2]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldp s1, s0, [sp, #24]
+; CHECK-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.s { v0 }[1], [x19]
+; CHECK-NEXT: ld1.s { v1 }[1], [x20]
+; CHECK-NEXT: ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.s { v0 }[2], [x21]
+; CHECK-NEXT: ld1.s { v1 }[2], [x22]
+; CHECK-NEXT: ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
; CHECK-NEXT: ret
%result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a)
ret { <3 x float>, <3 x float> } %result
}
-define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) {
+define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 {
; CHECK-LABEL: test_sincospi_v2f32:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: add x1, sp, #40
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: add x0, sp, #28
; CHECK-NEXT: add x1, sp, #24
-; CHECK-NEXT: add x19, sp, #28
-; CHECK-NEXT: add x20, sp, #24
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldp s1, s0, [sp, #40]
-; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.s }[1], [x19]
-; CHECK-NEXT: ld1 { v1.s }[1], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT: stp x20, x19, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
+; CHECK-NEXT: ; kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #20
+; CHECK-NEXT: add x1, sp, #16
+; CHECK-NEXT: add x19, sp, #20
+; CHECK-NEXT: add x20, sp, #16
+; CHECK-NEXT: mov s0, v0[1]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldp s1, s0, [sp, #24]
+; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.s { v0 }[1], [x19]
+; CHECK-NEXT: ld1.s { v1 }[1], [x20]
+; CHECK-NEXT: ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ; kill: def $d1 killed $d1 killed $q1
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
%result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a)
ret { <2 x float>, <2 x float> } %result
}
-define { double, double } @test_sincospi_f64(double %a) {
+define { double, double } @test_sincospi_f64(double %a) #0 {
; CHECK-LABEL: test_sincospi_f64:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: add x0, sp, #24
-; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospi
-; CHECK-NEXT: ldr d0, [sp, #24]
-; CHECK-NEXT: ldr d1, [sp, #8]
-; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #8
+; CHECK-NEXT: mov x1, sp
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: ldp d1, d0, [sp]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { double, double } @llvm.sincospi.f64(double %a)
ret { double, double } %result
}
-define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) {
+define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 {
; CHECK-LABEL: test_sincospi_v2f64:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #80
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: add x0, sp, #56
-; CHECK-NEXT: add x1, sp, #40
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: bl sincospi
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: add x0, sp, #32
-; CHECK-NEXT: add x1, sp, #24
-; CHECK-NEXT: add x19, sp, #32
-; CHECK-NEXT: add x20, sp, #24
-; CHECK-NEXT: mov d0, v0.d[1]
-; CHECK-NEXT: bl sincospi
-; CHECK-NEXT: ldr d0, [sp, #56]
-; CHECK-NEXT: ldr d1, [sp, #40]
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.d }[1], [x19]
-; CHECK-NEXT: ld1 { v1.d }[1], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #40
+; CHECK-NEXT: add x1, sp, #32
+; CHECK-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
+; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #24
+; CHECK-NEXT: add x1, sp, #16
+; CHECK-NEXT: add x19, sp, #24
+; CHECK-NEXT: add x20, sp, #16
+; CHECK-NEXT: mov d0, v0[1]
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: ldp d1, d0, [sp, #32]
+; CHECK-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.d { v0 }[1], [x19]
+; CHECK-NEXT: ld1.d { v1 }[1], [x20]
+; CHECK-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #80
; CHECK-NEXT: ret
%result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a)
ret { <2 x double>, <2 x double> } %result
}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
index 3230c9e..b3a7ec9 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
@@ -20,20 +20,17 @@ define i32 @sink_load_and_copy(i32 %n) {
; CHECK-NEXT: b.lt .LBB0_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
; CHECK-NEXT: adrp x8, A
-; CHECK-NEXT: mov w20, w19
-; CHECK-NEXT: ldr w21, [x8, :lo12:A]
+; CHECK-NEXT: mov w21, w19
+; CHECK-NEXT: ldr w20, [x8, :lo12:A]
; CHECK-NEXT: .LBB0_2: // %for.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: mov w0, w21
+; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
-; CHECK-NEXT: sdiv w20, w20, w0
-; CHECK-NEXT: subs w19, w19, #1
+; CHECK-NEXT: sdiv w19, w19, w0
+; CHECK-NEXT: subs w21, w21, #1
; CHECK-NEXT: b.ne .LBB0_2
-; CHECK-NEXT: b .LBB0_4
-; CHECK-NEXT: .LBB0_3:
-; CHECK-NEXT: mov w20, w19
-; CHECK-NEXT: .LBB0_4: // %for.cond.cleanup
-; CHECK-NEXT: mov w0, w20
+; CHECK-NEXT: .LBB0_3: // %for.cond.cleanup
+; CHECK-NEXT: mov w0, w19
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
@@ -82,15 +79,12 @@ define i32 @cant_sink_successive_call(i32 %n) {
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
-; CHECK-NEXT: sdiv w21, w21, w0
-; CHECK-NEXT: subs w19, w19, #1
+; CHECK-NEXT: sdiv w19, w19, w0
+; CHECK-NEXT: subs w21, w21, #1
; CHECK-NEXT: b.ne .LBB1_2
-; CHECK-NEXT: b .LBB1_4
-; CHECK-NEXT: .LBB1_3:
-; CHECK-NEXT: mov w21, w19
-; CHECK-NEXT: .LBB1_4: // %for.cond.cleanup
+; CHECK-NEXT: .LBB1_3: // %for.cond.cleanup
+; CHECK-NEXT: mov w0, w19
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov w0, w21
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
entry:
@@ -139,15 +133,12 @@ define i32 @cant_sink_successive_store(ptr nocapture readnone %store, i32 %n) {
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
-; CHECK-NEXT: sdiv w21, w21, w0
-; CHECK-NEXT: subs w19, w19, #1
+; CHECK-NEXT: sdiv w19, w19, w0
+; CHECK-NEXT: subs w21, w21, #1
; CHECK-NEXT: b.ne .LBB2_2
-; CHECK-NEXT: b .LBB2_4
-; CHECK-NEXT: .LBB2_3:
-; CHECK-NEXT: mov w21, w19
-; CHECK-NEXT: .LBB2_4: // %for.cond.cleanup
+; CHECK-NEXT: .LBB2_3: // %for.cond.cleanup
+; CHECK-NEXT: mov w0, w19
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov w0, w21
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir b/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir
new file mode 100644
index 0000000..c397953
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir
@@ -0,0 +1,133 @@
+# RUN: llc -mtriple=aarch64--- -run-pass=machine-outliner -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+
+ @x = common global i32 0, align 4
+
+ define i32 @adrp_add() #0 {
+ ret i32 0
+ }
+
+ define i32 @adrp_ldr() #0 {
+ ret i32 0
+ }
+
+ attributes #0 = { noinline noredzone }
+...
+---
+# Check that main function body doesn't split ADRP pair
+#
+# CHECK-LABEL: name: adrp_add
+# CHECK-DAG: bb.0:
+# CHECK: BL @OUTLINED_FUNCTION_[[F0:[0-9]+]]
+# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F2:[0-9]+]]
+# CHECK-NEXT: $lr = ORRXri $xzr, 1
+name: adrp_add
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
+ $lr = ORRXri $xzr, 1
+ bb.1:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
+ $lr = ORRXri $xzr, 1
+ bb.2:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
+ $lr = ORRXri $xzr, 1
+ bb.3:
+ liveins: $lr
+ RET undef $lr
+...
+---
+# Check that main function body doesn't split ADRP pair
+#
+# CHECK-LABEL: name: adrp_ldr
+# CHECK-DAG: bb.0:
+# CHECK: BL @OUTLINED_FUNCTION_[[F0]]
+# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F1:[0-9]+]]
+# CHECK-NEXT: $lr = ORRXri $xzr, 1
+name: adrp_ldr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+ $lr = ORRXri $xzr, 1
+ bb.1:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+ $lr = ORRXri $xzr, 1
+ bb.2:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+ $lr = ORRXri $xzr, 1
+ bb.3:
+ liveins: $lr
+ RET undef $lr
+
+# Check that no outlined function split the ADRP pair apart
+#
+# CHECK: OUTLINED_FUNCTION_[[F0]]
+# CHECK-DAG: bb.0
+# CHECK: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: RET $lr
+
+# CHECK: OUTLINED_FUNCTION_[[F1]]
+# CHECK-DAG: bb.0
+# CHECK: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+# CHECK-NEXT: $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+
+# CHECK: name: OUTLINED_FUNCTION_[[F2]]
+# CHECK-DAG: bb.0
+# CHECK: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+# CHECK-NEXT: $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
diff --git a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
index e7e1091..3380842 100644
--- a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
+++ b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
@@ -16,13 +16,12 @@ define i32 @test(ptr %ptr) {
; CHECK-NEXT: mov w9, wzr
; CHECK-NEXT: LBB0_1: ; %.thread
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lsr w11, w9, #1
; CHECK-NEXT: sub w10, w9, #1
-; CHECK-NEXT: mov w9, w11
+; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: tbnz w10, #0, LBB0_1
; CHECK-NEXT: ; %bb.2: ; %bb343
; CHECK-NEXT: and w9, w10, #0x1
-; CHECK-NEXT: mov w0, #-1
+; CHECK-NEXT: mov w0, #-1 ; =0xffffffff
; CHECK-NEXT: str w9, [x8]
; CHECK-NEXT: ret
bb:
diff --git a/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll b/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll
index b947c94..72f6646 100644
--- a/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll
+++ b/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll
@@ -151,12 +151,11 @@ define void @dont_coalesce_arg_f16(half %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
; CHECK-NEXT: str h0, [sp, #14] // 2-byte Folded Spill
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr h0, [sp, #14] // 2-byte Folded Reload
; CHECK-NEXT: bl use_f16
@@ -190,12 +189,11 @@ define void @dont_coalesce_arg_f32(float %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
; CHECK-NEXT: bl use_f32
@@ -229,12 +227,11 @@ define void @dont_coalesce_arg_f64(double %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_f64
@@ -273,12 +270,11 @@ define void @dont_coalesce_arg_v1i8(<1 x i8> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v16i8
@@ -313,12 +309,11 @@ define void @dont_coalesce_arg_v1i16(<1 x i16> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v8i16
@@ -353,12 +348,11 @@ define void @dont_coalesce_arg_v1i32(<1 x i32> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v4i32
@@ -393,12 +387,11 @@ define void @dont_coalesce_arg_v1i64(<1 x i64> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v2i64
@@ -433,12 +426,11 @@ define void @dont_coalesce_arg_v1f16(<1 x half> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
; CHECK-NEXT: str h0, [sp, #14] // 2-byte Folded Spill
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr h0, [sp, #14] // 2-byte Folded Reload
; CHECK-NEXT: bl use_v8f16
@@ -513,12 +505,11 @@ define void @dont_coalesce_arg_v1f64(<1 x double> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v2f64
@@ -557,12 +548,11 @@ define void @dont_coalesce_arg_v16i8(<16 x i8> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v16i8
@@ -596,12 +586,11 @@ define void @dont_coalesce_arg_v8i16(<8 x i16> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v8i16
@@ -635,12 +624,11 @@ define void @dont_coalesce_arg_v4i32(<4 x i32> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v4i32
@@ -674,12 +662,11 @@ define void @dont_coalesce_arg_v2i64(<2 x i64> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v2i64
@@ -713,12 +700,11 @@ define void @dont_coalesce_arg_v8f16(<8 x half> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v8f16
@@ -752,12 +738,11 @@ define void @dont_coalesce_arg_v8bf16(<8 x bfloat> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v8bf16
@@ -791,12 +776,11 @@ define void @dont_coalesce_arg_v4f32(<4 x float> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v4f32
@@ -830,12 +814,11 @@ define void @dont_coalesce_arg_v2f64(<2 x double> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v2f64
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
index f2163ad..df88f37 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
@@ -129,12 +129,11 @@ define <2 x double> @streaming_compatible_with_neon_vectors(<2 x double> %arg) "
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mrs x19, SVCR
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: mrs x19, SVCR
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: tbz w19, #0, .LBB4_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: smstop sm
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
index 6c6a691..52a77cb 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
@@ -147,15 +147,15 @@ define <2 x float> @extract_v2f32_nxv16f32_2(<vscale x 16 x float> %arg) {
define <4 x i1> @extract_v4i1_nxv32i1_0(<vscale x 32 x i1> %arg) {
; CHECK-LABEL: extract_v4i1_nxv32i1_0:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT: umov w8, v1.b[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: umov w9, v1.b[2]
+; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.h[1], w8
+; CHECK-NEXT: umov w8, v1.b[2]
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: umov w8, v1.b[3]
-; CHECK-NEXT: mov v0.h[2], w9
; CHECK-NEXT: mov v0.h[3], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%ext = call <4 x i1> @llvm.vector.extract.v4i1.nxv32i1(<vscale x 32 x i1> %arg, i64 0)
ret <4 x i1> %ext
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
index e103137..7299410 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
@@ -248,15 +248,15 @@ define <2 x i1> @extract_v2i1_nxv2i1(<vscale x 2 x i1> %inmask) {
define <4 x i1> @extract_v4i1_nxv4i1(<vscale x 4 x i1> %inmask) {
; CHECK-LABEL: extract_v4i1_nxv4i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.s, p0/z, #1 // =0x1
-; CHECK-NEXT: mov w8, v1.s[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov w9, v1.s[2]
+; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.h[1], w8
+; CHECK-NEXT: mov w8, v1.s[2]
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: mov w8, v1.s[3]
-; CHECK-NEXT: mov v0.h[2], w9
; CHECK-NEXT: mov v0.h[3], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%mask = call <4 x i1> @llvm.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1> %inmask, i64 0)
ret <4 x i1> %mask
@@ -265,23 +265,23 @@ define <4 x i1> @extract_v4i1_nxv4i1(<vscale x 4 x i1> %inmask) {
define <8 x i1> @extract_v8i1_nxv8i1(<vscale x 8 x i1> %inmask) {
; CHECK-LABEL: extract_v8i1_nxv8i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, p0/z, #1 // =0x1
-; CHECK-NEXT: umov w8, v1.h[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: umov w9, v1.h[2]
+; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
+; CHECK-NEXT: umov w8, v0.h[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.b[1], w8
+; CHECK-NEXT: umov w8, v1.h[2]
+; CHECK-NEXT: mov v0.b[2], w8
; CHECK-NEXT: umov w8, v1.h[3]
-; CHECK-NEXT: mov v0.b[2], w9
-; CHECK-NEXT: umov w9, v1.h[4]
; CHECK-NEXT: mov v0.b[3], w8
+; CHECK-NEXT: umov w8, v1.h[4]
+; CHECK-NEXT: mov v0.b[4], w8
; CHECK-NEXT: umov w8, v1.h[5]
-; CHECK-NEXT: mov v0.b[4], w9
-; CHECK-NEXT: umov w9, v1.h[6]
; CHECK-NEXT: mov v0.b[5], w8
+; CHECK-NEXT: umov w8, v1.h[6]
+; CHECK-NEXT: mov v0.b[6], w8
; CHECK-NEXT: umov w8, v1.h[7]
-; CHECK-NEXT: mov v0.b[6], w9
; CHECK-NEXT: mov v0.b[7], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%mask = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1> %inmask, i64 0)
ret <8 x i1> %mask
@@ -292,9 +292,9 @@ define <8 x i1> @extract_v8i1_nxv8i1(<vscale x 8 x i1> %inmask) {
define <16 x i1> @extract_v16i1_nxv16i1(<vscale x 16 x i1> %inmask) {
; CHECK-LABEL: extract_v16i1_nxv16i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.b[1], v1.b[1]
+; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: mov v0.b[1], v0.b[1]
; CHECK-NEXT: mov v0.b[2], v1.b[2]
; CHECK-NEXT: mov v0.b[3], v1.b[3]
; CHECK-NEXT: mov v0.b[4], v1.b[4]
@@ -309,6 +309,7 @@ define <16 x i1> @extract_v16i1_nxv16i1(<vscale x 16 x i1> %inmask) {
; CHECK-NEXT: mov v0.b[13], v1.b[13]
; CHECK-NEXT: mov v0.b[14], v1.b[14]
; CHECK-NEXT: mov v0.b[15], v1.b[15]
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
%mask = call <16 x i1> @llvm.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1> %inmask, i64 0)
ret <16 x i1> %mask
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
index 6fbae7e..2dda03e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
@@ -55,10 +55,9 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fadd z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fadd z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: fadd z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fadd_v32f16:
@@ -154,10 +153,9 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fadd z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fadd z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: fadd z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fadd_v16f32:
@@ -253,10 +251,9 @@ define void @fadd_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fadd z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: fadd z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fadd_v8f64:
@@ -660,10 +657,9 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1h { z4.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1h { z5.h }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.h, p0/m, z1.h, z2.h
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.h, p0/m, z3.h, z4.h
+; VBITS_GE_256-NEXT: fmad z3.h, p0/m, z4.h, z5.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v32f16:
@@ -771,10 +767,9 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1w { z4.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1w { z5.s }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.s, p0/m, z1.s, z2.s
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.s, p0/m, z3.s, z4.s
+; VBITS_GE_256-NEXT: fmad z3.s, p0/m, z4.s, z5.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v16f32:
@@ -881,10 +876,9 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1d { z4.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1d { z5.d }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.d, p0/m, z1.d, z2.d
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.d, p0/m, z3.d, z4.d
+; VBITS_GE_256-NEXT: fmad z3.d, p0/m, z4.d, z5.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v8f64:
@@ -990,10 +984,9 @@ define void @fmul_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fmul z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fmul z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: fmul z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmul_v32f16:
@@ -1089,10 +1082,9 @@ define void @fmul_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fmul z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fmul z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: fmul z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmul_v16f32:
@@ -1188,10 +1180,9 @@ define void @fmul_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fmul z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fmul z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: fmul z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmul_v8f64:
@@ -1827,10 +1818,9 @@ define void @fsub_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fsub z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fsub z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: fsub z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fsub_v32f16:
@@ -1926,10 +1916,9 @@ define void @fsub_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fsub z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fsub z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: fsub z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fsub_v16f32:
@@ -2025,10 +2014,9 @@ define void @fsub_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fsub z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fsub z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: fsub z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fsub_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
index e1ec5ee..633b429 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
@@ -64,10 +64,9 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1h { z4.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1h { z5.h }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.h, p0/m, z1.h, z2.h
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.h, p0/m, z3.h, z4.h
+; VBITS_GE_256-NEXT: fmad z3.h, p0/m, z4.h, z5.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v32f16:
@@ -181,10 +180,9 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1w { z4.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1w { z5.s }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.s, p0/m, z1.s, z2.s
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.s, p0/m, z3.s, z4.s
+; VBITS_GE_256-NEXT: fmad z3.s, p0/m, z4.s, z5.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v16f32:
@@ -297,10 +295,9 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1d { z4.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1d { z5.d }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.d, p0/m, z1.d, z2.d
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.d, p0/m, z3.d, z4.d
+; VBITS_GE_256-NEXT: fmad z3.d, p0/m, z4.d, z5.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
index de60dee..90a0499 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
@@ -55,10 +55,9 @@ define void @fmaxnm_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fmaxnm z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmaxnm_v32f16:
@@ -154,10 +153,9 @@ define void @fmaxnm_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fmaxnm z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmaxnm_v16f32:
@@ -253,10 +251,9 @@ define void @fmaxnm_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fmaxnm z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmaxnm_v8f64:
@@ -356,10 +353,9 @@ define void @fminnm_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fminnm z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fminnm z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fminnm_v32f16:
@@ -455,10 +451,9 @@ define void @fminnm_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fminnm z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fminnm z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fminnm_v16f32:
@@ -554,10 +549,9 @@ define void @fminnm_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fminnm z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fminnm z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fminnm_v8f64:
@@ -657,10 +651,9 @@ define void @fmax_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmax z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmax z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fmax z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmax_v32f16:
@@ -756,10 +749,9 @@ define void @fmax_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmax z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmax z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fmax z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmax_v16f32:
@@ -855,10 +847,9 @@ define void @fmax_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmax z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmax z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fmax z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmax_v8f64:
@@ -958,10 +949,9 @@ define void @fmin_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmin z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmin z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fmin z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmin_v32f16:
@@ -1057,10 +1047,9 @@ define void @fmin_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmin z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmin z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fmin z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmin_v16f32:
@@ -1156,10 +1145,9 @@ define void @fmin_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmin z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmin z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fmin z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmin_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
index 08a974f..a91b392 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
@@ -155,10 +155,9 @@ define void @sabd_v64i8_v64i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: sabd z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: sabd z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: sabd z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: sabd_v64i8_v64i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
index 58fca3a..7362395 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
@@ -456,10 +456,9 @@ define void @mul_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: mul z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v64i8:
@@ -555,10 +554,9 @@ define void @mul_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: mul z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v32i16:
@@ -654,10 +652,9 @@ define void @mul_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: mul z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v16i32:
@@ -759,10 +756,9 @@ define void @mul_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: mul z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
index 4926684..c563768 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
@@ -55,10 +55,9 @@ define void @smax_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: smax z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v64i8:
@@ -154,10 +153,9 @@ define void @smax_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: smax z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v32i16:
@@ -253,10 +251,9 @@ define void @smax_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: smax z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v16i32:
@@ -360,10 +357,9 @@ define void @smax_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: smax z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v8i64:
@@ -463,10 +459,9 @@ define void @smin_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: smin z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v64i8:
@@ -562,10 +557,9 @@ define void @smin_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: smin z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v32i16:
@@ -661,10 +655,9 @@ define void @smin_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: smin z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v16i32:
@@ -768,10 +761,9 @@ define void @smin_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: smin z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v8i64:
@@ -871,10 +863,9 @@ define void @umax_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: umax z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v64i8:
@@ -970,10 +961,9 @@ define void @umax_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: umax z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v32i16:
@@ -1069,10 +1059,9 @@ define void @umax_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: umax z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v16i32:
@@ -1176,10 +1165,9 @@ define void @umax_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: umax z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v8i64:
@@ -1279,10 +1267,9 @@ define void @umin_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: umin z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v64i8:
@@ -1378,10 +1365,9 @@ define void @umin_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: umin z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v32i16:
@@ -1477,10 +1463,9 @@ define void @umin_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: umin z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v16i32:
@@ -1584,10 +1569,9 @@ define void @umin_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: umin z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
index 41cce35..dfbc237 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
@@ -78,10 +78,9 @@ define void @smulh_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: smulh z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v64i8:
@@ -209,10 +208,9 @@ define void @smulh_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: smulh z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v32i16:
@@ -340,10 +338,9 @@ define void @smulh_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: smulh z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v16i32:
@@ -471,10 +468,9 @@ define void @smulh_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: smulh z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v8i64:
@@ -607,10 +603,9 @@ define void @umulh_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: umulh z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v64i8:
@@ -739,10 +734,9 @@ define void @umulh_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: umulh z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v32i16:
@@ -870,10 +864,9 @@ define void @umulh_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: umulh z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v16i32:
@@ -1001,10 +994,9 @@ define void @umulh_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: umulh z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
index 27be844..14204e9 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
@@ -616,10 +616,9 @@ define void @srem_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: sdiv z5.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: mls z0.s, p0/m, z2.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.s, p0/m, z5.s, z4.s
+; VBITS_GE_256-NEXT: mls z3.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: srem_v16i32:
@@ -744,11 +743,10 @@ define void @srem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_128-NEXT: movprfx z18, z16
; VBITS_GE_128-NEXT: sdiv z18.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; VBITS_GE_128-NEXT: movprfx z1, z2
-; VBITS_GE_128-NEXT: mls z1.d, p0/m, z19.d, z3.d
+; VBITS_GE_128-NEXT: mls z2.d, p0/m, z19.d, z3.d
; VBITS_GE_128-NEXT: mls z16.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: mls z5.d, p0/m, z7.d, z6.d
-; VBITS_GE_128-NEXT: stp q0, q1, [x0]
+; VBITS_GE_128-NEXT: stp q0, q2, [x0]
; VBITS_GE_128-NEXT: stp q16, q5, [x0, #32]
; VBITS_GE_128-NEXT: ret
;
@@ -765,10 +763,9 @@ define void @srem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: sdiv z5.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: mls z0.d, p0/m, z2.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.d, p0/m, z5.d, z4.d
+; VBITS_GE_256-NEXT: mls z3.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: srem_v8i64:
@@ -1434,10 +1431,9 @@ define void @urem_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: udiv z5.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: mls z0.s, p0/m, z2.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.s, p0/m, z5.s, z4.s
+; VBITS_GE_256-NEXT: mls z3.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: urem_v16i32:
@@ -1562,11 +1558,10 @@ define void @urem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_128-NEXT: movprfx z18, z16
; VBITS_GE_128-NEXT: udiv z18.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; VBITS_GE_128-NEXT: movprfx z1, z2
-; VBITS_GE_128-NEXT: mls z1.d, p0/m, z19.d, z3.d
+; VBITS_GE_128-NEXT: mls z2.d, p0/m, z19.d, z3.d
; VBITS_GE_128-NEXT: mls z16.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: mls z5.d, p0/m, z7.d, z6.d
-; VBITS_GE_128-NEXT: stp q0, q1, [x0]
+; VBITS_GE_128-NEXT: stp q0, q2, [x0]
; VBITS_GE_128-NEXT: stp q16, q5, [x0, #32]
; VBITS_GE_128-NEXT: ret
;
@@ -1583,10 +1578,9 @@ define void @urem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: udiv z5.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: mls z0.d, p0/m, z2.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.d, p0/m, z5.d, z4.d
+; VBITS_GE_256-NEXT: mls z3.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: urem_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
index 0fa8c8f5..a8afa90 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
@@ -57,10 +57,9 @@ define void @ashr_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: asr z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v64i8:
@@ -158,10 +157,9 @@ define void @ashr_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: asr z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v32i16:
@@ -259,10 +257,9 @@ define void @ashr_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: asr z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v16i32:
@@ -360,10 +357,9 @@ define void @ashr_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: asr z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v8i64:
@@ -465,10 +461,9 @@ define void @lshr_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: lsr z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v64i8:
@@ -566,10 +561,9 @@ define void @lshr_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: lsr z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v32i16:
@@ -667,10 +661,9 @@ define void @lshr_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: lsr z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v16i32:
@@ -768,10 +761,9 @@ define void @lshr_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: lsr z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v8i64:
@@ -871,10 +863,9 @@ define void @shl_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: lsl z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v64i8:
@@ -970,10 +961,9 @@ define void @shl_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: lsl z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v32i16:
@@ -1069,10 +1059,9 @@ define void @shl_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: lsl z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v16i32:
@@ -1168,10 +1157,9 @@ define void @shl_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: lsl z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
index 41e4a38..8e807cd 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
@@ -8,15 +8,15 @@ target triple = "aarch64-unknown-linux-gnu"
define <4 x i1> @reshuffle_v4i1_nxv4i1(<vscale x 4 x i1> %a) #0 {
; CHECK-LABEL: reshuffle_v4i1_nxv4i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.s, p0/z, #1 // =0x1
-; CHECK-NEXT: mov w8, v1.s[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov w9, v1.s[2]
+; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.h[1], w8
+; CHECK-NEXT: mov w8, v1.s[2]
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: mov w8, v1.s[3]
-; CHECK-NEXT: mov v0.h[2], w9
; CHECK-NEXT: mov v0.h[3], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%el0 = extractelement <vscale x 4 x i1> %a, i32 0
%el1 = extractelement <vscale x 4 x i1> %a, i32 1
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
index ba4a3a2..bd8f432 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
@@ -28,53 +28,53 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang
; CHECK: // %bb.0:
; CHECK-NEXT: tbnz w1, #0, .LBB1_2
; CHECK-NEXT: // %bb.1: // %vector.body
+; CHECK-NEXT: movi v2.2d, #0000000000000000
; CHECK-NEXT: movi v0.2d, #0000000000000000
-; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: umov w8, v0.b[8]
-; CHECK-NEXT: mov v1.b[1], v0.b[1]
-; CHECK-NEXT: movprfx z3, z0
-; CHECK-NEXT: ext z3.b, z3.b, z0.b, #16
+; CHECK-NEXT: umov w8, v2.b[8]
+; CHECK-NEXT: mov v0.b[1], v2.b[1]
+; CHECK-NEXT: ext z3.b, z3.b, z3.b, #16
; CHECK-NEXT: ext v4.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: mov v1.b[2], v0.b[2]
-; CHECK-NEXT: mov v2.b[1], v0.b[9]
-; CHECK-NEXT: mov v1.b[3], v0.b[3]
-; CHECK-NEXT: mov v2.b[2], v0.b[10]
-; CHECK-NEXT: mov v1.b[4], v0.b[4]
-; CHECK-NEXT: mov v2.b[3], v0.b[11]
-; CHECK-NEXT: mov v1.b[5], v0.b[5]
-; CHECK-NEXT: mov v2.b[4], v0.b[12]
-; CHECK-NEXT: mov v1.b[6], v0.b[6]
-; CHECK-NEXT: mov v2.b[5], v0.b[13]
-; CHECK-NEXT: mov v1.b[7], v0.b[7]
-; CHECK-NEXT: mov v2.b[6], v0.b[14]
-; CHECK-NEXT: uunpklo z1.h, z1.b
-; CHECK-NEXT: mov v2.b[7], v0.b[15]
-; CHECK-NEXT: uunpklo z0.h, z3.b
+; CHECK-NEXT: fmov s1, w8
+; CHECK-NEXT: mov v0.b[2], v2.b[2]
+; CHECK-NEXT: mov v1.b[1], v2.b[9]
+; CHECK-NEXT: mov v0.b[3], v2.b[3]
+; CHECK-NEXT: mov v1.b[2], v2.b[10]
+; CHECK-NEXT: mov v0.b[4], v2.b[4]
+; CHECK-NEXT: mov v1.b[3], v2.b[11]
+; CHECK-NEXT: mov v0.b[5], v2.b[5]
+; CHECK-NEXT: mov v1.b[4], v2.b[12]
+; CHECK-NEXT: mov v0.b[6], v2.b[6]
+; CHECK-NEXT: mov v1.b[5], v2.b[13]
+; CHECK-NEXT: mov v0.b[7], v2.b[7]
+; CHECK-NEXT: mov v1.b[6], v2.b[14]
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: mov v1.b[7], v2.b[15]
+; CHECK-NEXT: uunpklo z2.h, z3.b
; CHECK-NEXT: uunpklo z3.h, z4.b
-; CHECK-NEXT: uunpklo z1.s, z1.h
-; CHECK-NEXT: uunpklo z2.h, z2.b
; CHECK-NEXT: uunpklo z0.s, z0.h
-; CHECK-NEXT: uunpklo z3.s, z3.h
-; CHECK-NEXT: lsl z1.s, z1.s, #31
+; CHECK-NEXT: uunpklo z1.h, z1.b
; CHECK-NEXT: uunpklo z2.s, z2.h
+; CHECK-NEXT: uunpklo z3.s, z3.h
; CHECK-NEXT: lsl z0.s, z0.s, #31
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: lsl z2.s, z2.s, #31
; CHECK-NEXT: lsl z3.s, z3.s, #31
-; CHECK-NEXT: asr z1.s, z1.s, #31
; CHECK-NEXT: asr z0.s, z0.s, #31
+; CHECK-NEXT: asr z2.s, z2.s, #31
; CHECK-NEXT: asr z3.s, z3.s, #31
-; CHECK-NEXT: lsl z2.s, z2.s, #31
-; CHECK-NEXT: cmpne p3.s, p0/z, z1.s, #0
-; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
-; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: lsl z1.s, z1.s, #31
+; CHECK-NEXT: cmpne p3.s, p0/z, z0.s, #0
+; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0
+; CHECK-NEXT: movi v2.2d, #0000000000000000
; CHECK-NEXT: cmpne p2.s, p0/z, z3.s, #0
-; CHECK-NEXT: asr z2.s, z2.s, #31
-; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, #0
-; CHECK-NEXT: st1w { z0.s }, p1, [x0, #2, mul vl]
-; CHECK-NEXT: st1w { z0.s }, p2, [x0, #3, mul vl]
-; CHECK-NEXT: st1w { z0.s }, p3, [x0]
-; CHECK-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: asr z1.s, z1.s, #31
+; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT: st1w { z2.s }, p1, [x0, #2, mul vl]
+; CHECK-NEXT: st1w { z2.s }, p2, [x0, #3, mul vl]
+; CHECK-NEXT: st1w { z2.s }, p3, [x0]
+; CHECK-NEXT: st1w { z2.s }, p0, [x0, #1, mul vl]
; CHECK-NEXT: .LBB1_2: // %exit
; CHECK-NEXT: ret
%broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll b/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll
index 124f81e..39fe92a 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll
@@ -11,12 +11,12 @@ define void @test_sink_ptrue_into_ptest(i32 %n) {
; CHECK-NEXT: whilelt p0.s, wzr, w0
; CHECK-NEXT: b.pl .LBB0_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: cntw x9
+; CHECK-NEXT: mov w9, wzr
+; CHECK-NEXT: cntw x8
; CHECK-NEXT: .LBB0_2: // %for.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: whilelt p0.s, w8, w0
-; CHECK-NEXT: add w8, w8, w9
+; CHECK-NEXT: whilelt p0.s, w9, w0
+; CHECK-NEXT: add w9, w9, w8
; CHECK-NEXT: b.mi .LBB0_2
; CHECK-NEXT: .LBB0_3: // %exit
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
index f2c882c..20c06f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
@@ -193,9 +193,8 @@ define void @fadd_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v16f16:
@@ -397,9 +396,8 @@ define void @fadd_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v8f32:
@@ -479,9 +477,8 @@ define void @fadd_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v4f64:
@@ -703,9 +700,8 @@ define void @fdiv_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fdivr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fdiv z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fdiv z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fdiv_v16f16:
@@ -907,9 +903,8 @@ define void @fdiv_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fdivr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fdiv z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fdiv z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fdiv_v8f32:
@@ -989,9 +984,8 @@ define void @fdiv_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fdivr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fdiv z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fdiv z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fdiv_v4f64:
@@ -1253,9 +1247,8 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.h, p0/m, z2.h, z1.h
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.h, p0/m, z4.h, z5.h
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v16f16:
@@ -1501,9 +1494,8 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.s, p0/m, z4.s, z5.s
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v8f32:
@@ -1595,9 +1587,8 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.d, p0/m, z4.d, z5.d
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v4f64:
@@ -1824,9 +1815,8 @@ define void @fmul_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmul z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmul_v16f16:
@@ -2028,9 +2018,8 @@ define void @fmul_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmul z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmul_v8f32:
@@ -2110,9 +2099,8 @@ define void @fmul_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmul z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmul z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmul_v4f64:
@@ -3152,9 +3140,8 @@ define void @fsub_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fsub z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fsub z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fsub_v16f16:
@@ -3356,9 +3343,8 @@ define void @fsub_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fsub z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fsub z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fsub_v8f32:
@@ -3438,9 +3424,8 @@ define void @fsub_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fsub z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fsub z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fsub_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
index 680cb4f..dbacd77 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
@@ -208,9 +208,8 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.h, p0/m, z2.h, z1.h
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.h, p0/m, z4.h, z5.h
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v16f16:
@@ -526,9 +525,8 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.s, p0/m, z4.s, z5.s
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v8f32:
@@ -642,9 +640,8 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.d, p0/m, z4.d, z5.d
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
index 84aea18..e53d6a9 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
@@ -143,9 +143,8 @@ define void @fmaxnm_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmaxnm z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmaxnm_v16f16:
@@ -347,9 +346,8 @@ define void @fmaxnm_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmaxnm z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmaxnm_v8f32:
@@ -448,9 +446,8 @@ define void @fmaxnm_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmaxnm z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmaxnm_v4f64:
@@ -622,9 +619,8 @@ define void @fminnm_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fminnm z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fminnm z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fminnm_v16f16:
@@ -826,9 +822,8 @@ define void @fminnm_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fminnm z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fminnm z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fminnm_v8f32:
@@ -927,9 +922,8 @@ define void @fminnm_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fminnm z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fminnm z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fminnm_v4f64:
@@ -1101,9 +1095,8 @@ define void @fmax_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmax z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmax z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmax_v16f16:
@@ -1305,9 +1298,8 @@ define void @fmax_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmax z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmax z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmax_v8f32:
@@ -1406,9 +1398,8 @@ define void @fmax_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmax z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmax z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmax_v4f64:
@@ -1580,9 +1571,8 @@ define void @fmin_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmin z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmin z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmin_v16f16:
@@ -1784,9 +1774,8 @@ define void @fmin_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmin z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmin z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmin_v8f32:
@@ -1885,9 +1874,8 @@ define void @fmin_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmin z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmin z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmin_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
index 4360f3a..02b5469 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
@@ -975,9 +975,8 @@ define void @mul_v32i8(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.b, vl16
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.b, p0/m, z0.b, z1.b
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.b, p0/m, z1.b, z3.b
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.b, p0/m, z2.b, z3.b
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v32i8:
@@ -1286,9 +1285,8 @@ define void @mul_v16i16(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.h, vl8
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.h, p0/m, z0.h, z1.h
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.h, p0/m, z1.h, z3.h
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.h, p0/m, z2.h, z3.h
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v16i16:
@@ -1467,9 +1465,8 @@ define void @mul_v8i32(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.s, vl4
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.s, p0/m, z0.s, z1.s
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.s, p0/m, z1.s, z3.s
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.s, p0/m, z2.s, z3.s
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v8i32:
@@ -1599,9 +1596,8 @@ define void @mul_v4i64(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.d, vl2
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.d, p0/m, z1.d, z3.d
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.d, p0/m, z2.d, z3.d
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
index 1fdcd4f..8e1d61b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
@@ -779,9 +779,8 @@ define void @sdiv_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: sdivr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: sdiv z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: sdiv z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: sdiv_v8i32:
@@ -886,9 +885,8 @@ define void @sdiv_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: sdivr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: sdiv z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: sdiv z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: sdiv_v4i64:
@@ -1693,9 +1691,8 @@ define void @udiv_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: udivr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: udiv z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: udiv z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: udiv_v8i32:
@@ -1800,9 +1797,8 @@ define void @udiv_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: udivr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: udiv z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: udiv z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: udiv_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
index 1bca7dd..d858d81 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
@@ -179,9 +179,8 @@ define void @smax_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v32i8:
@@ -473,9 +472,8 @@ define void @smax_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v16i16:
@@ -651,9 +649,8 @@ define void @smax_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v8i32:
@@ -771,9 +768,8 @@ define void @smax_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v4i64:
@@ -985,9 +981,8 @@ define void @smin_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v32i8:
@@ -1279,9 +1274,8 @@ define void @smin_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v16i16:
@@ -1457,9 +1451,8 @@ define void @smin_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v8i32:
@@ -1577,9 +1570,8 @@ define void @smin_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v4i64:
@@ -1791,9 +1783,8 @@ define void @umax_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v32i8:
@@ -2085,9 +2076,8 @@ define void @umax_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v16i16:
@@ -2263,9 +2253,8 @@ define void @umax_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v8i32:
@@ -2383,9 +2372,8 @@ define void @umax_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v4i64:
@@ -2597,9 +2585,8 @@ define void @umin_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v32i8:
@@ -2891,9 +2878,8 @@ define void @umin_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v16i16:
@@ -3069,9 +3055,8 @@ define void @umin_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v8i32:
@@ -3189,9 +3174,8 @@ define void @umin_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
index 0c97eed..85b7b4d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
@@ -294,9 +294,8 @@ define void @smulh_v32i8(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.b, vl16
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.b, p0/m, z0.b, z1.b
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.b, p0/m, z1.b, z3.b
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.b, p0/m, z2.b, z3.b
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v32i8:
@@ -755,9 +754,8 @@ define void @smulh_v16i16(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.h, vl8
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.h, p0/m, z0.h, z1.h
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.h, p0/m, z1.h, z3.h
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.h, p0/m, z2.h, z3.h
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v16i16:
@@ -1001,9 +999,8 @@ define void @smulh_v8i32(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.s, vl4
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.s, p0/m, z0.s, z1.s
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.s, p0/m, z1.s, z3.s
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.s, p0/m, z2.s, z3.s
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v8i32:
@@ -1159,9 +1156,8 @@ define void @smulh_v4i64(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.d, vl2
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.d, p0/m, z0.d, z1.d
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.d, p0/m, z1.d, z3.d
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.d, p0/m, z2.d, z3.d
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v4i64:
@@ -1494,9 +1490,8 @@ define void @umulh_v32i8(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.b, vl16
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.b, p0/m, z0.b, z1.b
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.b, p0/m, z1.b, z3.b
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.b, p0/m, z2.b, z3.b
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v32i8:
@@ -1954,9 +1949,8 @@ define void @umulh_v16i16(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.h, vl8
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.h, p0/m, z0.h, z1.h
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.h, p0/m, z1.h, z3.h
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.h, p0/m, z2.h, z3.h
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v16i16:
@@ -2200,9 +2194,8 @@ define void @umulh_v8i32(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.s, vl4
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.s, p0/m, z0.s, z1.s
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.s, p0/m, z1.s, z3.s
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.s, p0/m, z2.s, z3.s
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v8i32:
@@ -2358,9 +2351,8 @@ define void @umulh_v4i64(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.d, vl2
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.d, p0/m, z0.d, z1.d
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.d, p0/m, z1.d, z3.d
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.d, p0/m, z2.d, z3.d
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
index 372f6a0..c4b6c0e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
@@ -883,9 +883,8 @@ define void @srem_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: sdiv z5.s, p0/m, z5.s, z3.s
; CHECK-NEXT: msb z0.s, p0/m, z4.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.s, p0/m, z5.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: srem_v8i32:
@@ -1013,9 +1012,8 @@ define void @srem_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: sdiv z5.d, p0/m, z5.d, z3.d
; CHECK-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.d, p0/m, z5.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: srem_v4i64:
@@ -1933,9 +1931,8 @@ define void @urem_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: udiv z5.s, p0/m, z5.s, z3.s
; CHECK-NEXT: msb z0.s, p0/m, z4.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.s, p0/m, z5.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: urem_v8i32:
@@ -2063,9 +2060,8 @@ define void @urem_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: udiv z5.d, p0/m, z5.d, z3.d
; CHECK-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.d, p0/m, z5.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: urem_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
index d0f9921..4cf8945 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
@@ -195,9 +195,8 @@ define void @ashr_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v32i8:
@@ -476,9 +475,8 @@ define void @ashr_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v16i16:
@@ -632,9 +630,8 @@ define void @ashr_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v8i32:
@@ -739,9 +736,8 @@ define void @ashr_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v4i64:
@@ -965,9 +961,8 @@ define void @lshr_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v32i8:
@@ -1246,9 +1241,8 @@ define void @lshr_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v16i16:
@@ -1402,9 +1396,8 @@ define void @lshr_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v8i32:
@@ -1509,9 +1502,8 @@ define void @lshr_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v4i64:
@@ -1764,9 +1756,8 @@ define void @shl_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v32i8:
@@ -2014,9 +2005,8 @@ define void @shl_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v16i16:
@@ -2170,9 +2160,8 @@ define void @shl_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v8i32:
@@ -2277,9 +2266,8 @@ define void @shl_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
index 74e5fe7..e9b2f53 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
@@ -954,9 +954,8 @@ define void @fadd_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v16f16:
@@ -1170,9 +1169,8 @@ define void @fadd_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v8f32:
@@ -1258,9 +1256,8 @@ define void @fadd_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
index e0e88c4..e78671a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
@@ -526,10 +526,9 @@ define void @zip_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: zip1 z5.d, z0.d, z2.d
; CHECK-NEXT: trn2 z1.d, z1.d, z3.d
; CHECK-NEXT: trn2 z0.d, z0.d, z2.d
-; CHECK-NEXT: movprfx z2, z4
-; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z5.d
+; CHECK-NEXT: fadd z4.d, p0/m, z4.d, z5.d
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: stp q2, q0, [x0]
+; CHECK-NEXT: stp q4, q0, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip_v4f64:
@@ -2159,10 +2158,9 @@ define void @zip_vscale2_4(ptr %a, ptr %b) {
; CHECK-NEXT: zip1 z5.d, z0.d, z2.d
; CHECK-NEXT: trn2 z1.d, z1.d, z3.d
; CHECK-NEXT: trn2 z0.d, z0.d, z2.d
-; CHECK-NEXT: movprfx z2, z4
-; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z5.d
+; CHECK-NEXT: fadd z4.d, p0/m, z4.d, z5.d
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: stp q2, q0, [x0]
+; CHECK-NEXT: stp q4, q0, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip_vscale2_4:
diff --git a/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll b/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll
index 6af2606..0472d5c 100644
--- a/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll
@@ -36,10 +36,9 @@ define i32 @test(<vscale x 32 x i8> %bin.rdx, <vscale x 32 x i8> %bin.rdx2) {
; CHECK-NEXT: mla z0.s, p0/m, z25.s, z24.s
; CHECK-NEXT: mad z2.s, p0/m, z6.s, z4.s
; CHECK-NEXT: mad z1.s, p0/m, z3.s, z26.s
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: mla z3.s, p0/m, z28.s, z7.s
+; CHECK-NEXT: mla z5.s, p0/m, z28.s, z7.s
; CHECK-NEXT: add z0.s, z2.s, z0.s
-; CHECK-NEXT: add z1.s, z3.s, z1.s
+; CHECK-NEXT: add z1.s, z5.s, z1.s
; CHECK-NEXT: add z0.s, z1.s, z0.s
; CHECK-NEXT: uaddv d0, p0, z0.s
; CHECK-NEXT: fmov w0, s0
diff --git a/llvm/test/CodeGen/AArch64/sve2-xar.ll b/llvm/test/CodeGen/AArch64/sve2-xar.ll
index 888e94d..8f6f451 100644
--- a/llvm/test/CodeGen/AArch64/sve2-xar.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-xar.ll
@@ -157,10 +157,9 @@ define <vscale x 2 x i64> @xar_nxv2i64_l_neg1(<vscale x 2 x i64> %x, <vscale x 2
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and z3.d, z3.d, #0x3f
; CHECK-NEXT: and z2.d, z2.d, #0x3f
-; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT: lslr z3.d, p0/m, z3.d, z0.d
; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z2.d
-; CHECK-NEXT: orr z0.d, z1.d, z0.d
+; CHECK-NEXT: orr z0.d, z3.d, z0.d
; CHECK-NEXT: ret
%a = xor <vscale x 2 x i64> %x, %y
%b = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %z)
diff --git a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
index 74a717f..935189d 100644
--- a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
@@ -2835,11 +2835,11 @@ define i32 @test_widening_instr_mull(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB24_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v0.16b }, [x1], #16
-; CHECK-BE-NEXT: add x8, x0, #16
+; CHECK-BE-NEXT: mov x8, x0
; CHECK-BE-NEXT: ld1 { v1.8h }, [x0]
-; CHECK-BE-NEXT: ld1 { v3.8h }, [x8]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: add x10, x0, #32
+; CHECK-BE-NEXT: add x0, x0, #16
+; CHECK-BE-NEXT: add x9, x8, #48
+; CHECK-BE-NEXT: ld1 { v3.8h }, [x0]
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: ushll v2.8h, v0.8b, #0
; CHECK-BE-NEXT: ushll2 v0.8h, v0.16b, #0
@@ -2847,11 +2847,11 @@ define i32 @test_widening_instr_mull(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: umull2 v5.4s, v3.8h, v0.8h
; CHECK-BE-NEXT: umull v0.4s, v3.4h, v0.4h
; CHECK-BE-NEXT: umull2 v1.4s, v1.8h, v2.8h
-; CHECK-BE-NEXT: st1 { v4.4s }, [x0]
-; CHECK-BE-NEXT: mov x0, x8
+; CHECK-BE-NEXT: st1 { v4.4s }, [x8]
+; CHECK-BE-NEXT: add x8, x8, #32
; CHECK-BE-NEXT: st1 { v5.4s }, [x9]
-; CHECK-BE-NEXT: st1 { v0.4s }, [x10]
-; CHECK-BE-NEXT: st1 { v1.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v0.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v1.4s }, [x0]
; CHECK-BE-NEXT: b.ne .LBB24_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
@@ -2950,26 +2950,26 @@ define i32 @test_widening_instr_mull_64(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB25_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v4.16b }, [x0]
-; CHECK-BE-NEXT: add x9, x1, #48
-; CHECK-BE-NEXT: add x8, x1, #32
-; CHECK-BE-NEXT: ld1 { v18.4s }, [x9]
+; CHECK-BE-NEXT: add x10, x1, #48
; CHECK-BE-NEXT: ld1 { v16.4s }, [x1]
+; CHECK-BE-NEXT: add x9, x1, #32
+; CHECK-BE-NEXT: ld1 { v18.4s }, [x10]
; CHECK-BE-NEXT: add x1, x1, #16
-; CHECK-BE-NEXT: ld1 { v20.4s }, [x8]
+; CHECK-BE-NEXT: ld1 { v20.4s }, [x9]
; CHECK-BE-NEXT: ld1 { v22.4s }, [x1]
-; CHECK-BE-NEXT: add x8, x0, #96
+; CHECK-BE-NEXT: add x9, x0, #96
; CHECK-BE-NEXT: tbl v5.16b, { v4.16b }, v3.16b
; CHECK-BE-NEXT: tbl v6.16b, { v4.16b }, v2.16b
; CHECK-BE-NEXT: tbl v7.16b, { v4.16b }, v1.16b
; CHECK-BE-NEXT: tbl v4.16b, { v4.16b }, v0.16b
; CHECK-BE-NEXT: ext v24.16b, v18.16b, v18.16b, #8
-; CHECK-BE-NEXT: add x9, x0, #32
+; CHECK-BE-NEXT: mov x8, x0
; CHECK-BE-NEXT: ext v25.16b, v20.16b, v20.16b, #8
-; CHECK-BE-NEXT: add x10, x0, #16
+; CHECK-BE-NEXT: add x10, x0, #32
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: ext v17.16b, v5.16b, v5.16b, #8
-; CHECK-BE-NEXT: ext v19.16b, v6.16b, v6.16b, #8
; CHECK-BE-NEXT: rev32 v5.8b, v5.8b
+; CHECK-BE-NEXT: ext v19.16b, v6.16b, v6.16b, #8
; CHECK-BE-NEXT: rev32 v21.8b, v7.8b
; CHECK-BE-NEXT: rev32 v23.8b, v4.8b
; CHECK-BE-NEXT: ext v7.16b, v7.16b, v7.16b, #8
@@ -2986,22 +2986,22 @@ define i32 @test_widening_instr_mull_64(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: rev32 v4.8b, v4.8b
; CHECK-BE-NEXT: umull v17.2d, v17.2s, v24.2s
; CHECK-BE-NEXT: umull v19.2d, v19.2s, v25.2s
-; CHECK-BE-NEXT: st1 { v5.2d }, [x8]
+; CHECK-BE-NEXT: st1 { v5.2d }, [x9]
; CHECK-BE-NEXT: umull v5.2d, v6.2s, v20.2s
; CHECK-BE-NEXT: umull v6.2d, v7.2s, v21.2s
-; CHECK-BE-NEXT: add x8, x0, #112
+; CHECK-BE-NEXT: add x9, x0, #112
; CHECK-BE-NEXT: umull v4.2d, v4.2s, v16.2s
-; CHECK-BE-NEXT: st1 { v18.2d }, [x9]
-; CHECK-BE-NEXT: add x9, x0, #80
+; CHECK-BE-NEXT: st1 { v18.2d }, [x10]
+; CHECK-BE-NEXT: add x10, x0, #80
; CHECK-BE-NEXT: st1 { v22.2d }, [x0]
-; CHECK-BE-NEXT: st1 { v17.2d }, [x8]
-; CHECK-BE-NEXT: add x8, x0, #64
-; CHECK-BE-NEXT: st1 { v19.2d }, [x9]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: mov x0, x8
-; CHECK-BE-NEXT: st1 { v5.2d }, [x8]
+; CHECK-BE-NEXT: add x0, x0, #64
+; CHECK-BE-NEXT: st1 { v17.2d }, [x9]
+; CHECK-BE-NEXT: add x9, x8, #48
+; CHECK-BE-NEXT: add x8, x8, #16
+; CHECK-BE-NEXT: st1 { v19.2d }, [x10]
+; CHECK-BE-NEXT: st1 { v5.2d }, [x0]
; CHECK-BE-NEXT: st1 { v6.2d }, [x9]
-; CHECK-BE-NEXT: st1 { v4.2d }, [x10]
+; CHECK-BE-NEXT: st1 { v4.2d }, [x8]
; CHECK-BE-NEXT: b.ne .LBB25_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
@@ -3093,13 +3093,14 @@ define i32 @test_widening_instr_mull_2(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB26_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v4.16b }, [x1], #16
-; CHECK-BE-NEXT: add x8, x0, #32
+; CHECK-BE-NEXT: mov x8, x0
+; CHECK-BE-NEXT: add x9, x0, #32
; CHECK-BE-NEXT: ld1 { v16.4s }, [x0]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: add x10, x0, #16
-; CHECK-BE-NEXT: ld1 { v17.4s }, [x8]
-; CHECK-BE-NEXT: ld1 { v18.4s }, [x9]
-; CHECK-BE-NEXT: ld1 { v19.4s }, [x10]
+; CHECK-BE-NEXT: add x10, x0, #48
+; CHECK-BE-NEXT: add x0, x0, #16
+; CHECK-BE-NEXT: ld1 { v17.4s }, [x9]
+; CHECK-BE-NEXT: ld1 { v18.4s }, [x10]
+; CHECK-BE-NEXT: ld1 { v19.4s }, [x0]
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: tbl v5.16b, { v4.16b }, v1.16b
; CHECK-BE-NEXT: tbl v6.16b, { v4.16b }, v3.16b
@@ -3113,11 +3114,10 @@ define i32 @test_widening_instr_mull_2(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: mul v6.4s, v17.4s, v6.4s
; CHECK-BE-NEXT: mul v7.4s, v18.4s, v7.4s
; CHECK-BE-NEXT: mul v4.4s, v19.4s, v4.4s
-; CHECK-BE-NEXT: st1 { v5.4s }, [x0]
-; CHECK-BE-NEXT: mov x0, x10
-; CHECK-BE-NEXT: st1 { v6.4s }, [x8]
-; CHECK-BE-NEXT: st1 { v7.4s }, [x9]
-; CHECK-BE-NEXT: st1 { v4.4s }, [x10]
+; CHECK-BE-NEXT: st1 { v5.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v6.4s }, [x9]
+; CHECK-BE-NEXT: st1 { v7.4s }, [x10]
+; CHECK-BE-NEXT: st1 { v4.4s }, [x0]
; CHECK-BE-NEXT: b.ne .LBB26_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
@@ -3246,11 +3246,11 @@ define i32 @mul_zext_16i8_sext_16i16(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB28_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v0.16b }, [x1], #16
-; CHECK-BE-NEXT: add x8, x0, #16
+; CHECK-BE-NEXT: mov x8, x0
; CHECK-BE-NEXT: ld1 { v1.8h }, [x0]
-; CHECK-BE-NEXT: ld1 { v3.8h }, [x8]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: add x10, x0, #32
+; CHECK-BE-NEXT: add x0, x0, #16
+; CHECK-BE-NEXT: add x9, x8, #48
+; CHECK-BE-NEXT: ld1 { v3.8h }, [x0]
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: ushll v2.8h, v0.8b, #0
; CHECK-BE-NEXT: ushll2 v0.8h, v0.16b, #0
@@ -3258,11 +3258,11 @@ define i32 @mul_zext_16i8_sext_16i16(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: smull2 v5.4s, v3.8h, v0.8h
; CHECK-BE-NEXT: smull v0.4s, v3.4h, v0.4h
; CHECK-BE-NEXT: smull2 v1.4s, v1.8h, v2.8h
-; CHECK-BE-NEXT: st1 { v4.4s }, [x0]
-; CHECK-BE-NEXT: mov x0, x8
+; CHECK-BE-NEXT: st1 { v4.4s }, [x8]
+; CHECK-BE-NEXT: add x8, x8, #32
; CHECK-BE-NEXT: st1 { v5.4s }, [x9]
-; CHECK-BE-NEXT: st1 { v0.4s }, [x10]
-; CHECK-BE-NEXT: st1 { v1.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v0.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v1.4s }, [x0]
; CHECK-BE-NEXT: b.ne .LBB28_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
index dd01112..c1e6b4f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
@@ -21,14 +21,14 @@ define void @divergent_i1_phi_used_outside_loop(float %val, float %pre.cond.val,
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s6
; GFX10-NEXT: s_mov_b32 s8, exec_lo
+; GFX10-NEXT: s_mov_b32 s9, s5
; GFX10-NEXT: s_add_i32 s6, s6, 1
-; GFX10-NEXT: s_xor_b32 s8, s5, s8
+; GFX10-NEXT: s_xor_b32 s5, s5, s8
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v1, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 s7, s7, exec_lo
-; GFX10-NEXT: s_and_b32 s9, exec_lo, s5
-; GFX10-NEXT: s_mov_b32 s5, s8
-; GFX10-NEXT: s_or_b32 s7, s7, s9
+; GFX10-NEXT: s_and_b32 s8, exec_lo, s9
+; GFX10-NEXT: s_or_b32 s7, s7, s8
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB0_1
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -240,11 +240,11 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts,
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
-; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_and_saveexec_b32 s7, vcc_lo
; GFX10-NEXT: s_cbranch_execz .LBB4_6
; GFX10-NEXT: ; %bb.1: ; %loop.start.preheader
-; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: ; implicit-def: $sgpr10
; GFX10-NEXT: ; implicit-def: $sgpr11
; GFX10-NEXT: ; implicit-def: $sgpr9
@@ -345,8 +345,8 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace
; GFX10-LABEL: divergent_i1_icmp_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: s_mov_b32 s6, 0
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: s_mov_b32 s6, 0
; GFX10-NEXT: ; implicit-def: $sgpr7
; GFX10-NEXT: s_branch .LBB5_2
; GFX10-NEXT: .LBB5_1: ; %Flow
@@ -457,8 +457,8 @@ define amdgpu_ps void @divergent_i1_freeze_used_outside_loop(i32 %n, ptr addrspa
; GFX10-LABEL: divergent_i1_freeze_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_mov_b32 s1, exec_lo
-; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: ; implicit-def: $sgpr4
; GFX10-NEXT: ; implicit-def: $sgpr3
; GFX10-NEXT: s_branch .LBB6_2
@@ -534,8 +534,8 @@ exit:
define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %a.break) {
; GFX10-LABEL: loop_with_1break:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr6
; GFX10-NEXT: ; implicit-def: $sgpr7
; GFX10-NEXT: ; implicit-def: $sgpr5
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
index fd08ab8..484536b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
@@ -106,8 +106,8 @@ exit:
define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a) {
; GFX10-LABEL: loop_with_1break:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr5
; GFX10-NEXT: s_branch .LBB2_2
; GFX10-NEXT: .LBB2_1: ; %Flow
@@ -180,8 +180,8 @@ exit:
define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX10-LABEL: loop_with_2breaks:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr5
; GFX10-NEXT: s_branch .LBB3_3
; GFX10-NEXT: .LBB3_1: ; %Flow3
@@ -278,8 +278,8 @@ exit:
define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c) {
; GFX10-LABEL: loop_with_3breaks:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr5
; GFX10-NEXT: s_branch .LBB4_4
; GFX10-NEXT: .LBB4_1: ; %Flow5
@@ -404,8 +404,8 @@ exit:
define amdgpu_cs void @loop_with_div_break_with_body(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %a.break) {
; GFX10-LABEL: loop_with_div_break_with_body:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr6
; GFX10-NEXT: ; implicit-def: $sgpr7
; GFX10-NEXT: ; implicit-def: $sgpr5
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
index d13d6a1..69baf61 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
@@ -101,8 +101,8 @@ define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, i32 %x.size, ptr ad
; GFX10-LABEL: loop_with_1break:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: v_mov_b32_e32 v3, 0
-; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: ; implicit-def: $sgpr10
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: s_branch .LBB2_3
@@ -197,14 +197,14 @@ define void @nested_loops_temporal_divergence_inner(float %pre.cond.val, i32 %n.
; GFX10-LABEL: nested_loops_temporal_divergence_inner:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0
; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: .LBB3_1: ; %OuterHeader
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB3_2 Depth 2
; GFX10-NEXT: s_ashr_i32 s7, s6, 31
-; GFX10-NEXT: s_mov_b32 s4, s8
+; GFX10-NEXT: s_mov_b32 s4, s5
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: v_mov_b32_e32 v6, s10
@@ -239,13 +239,13 @@ define void @nested_loops_temporal_divergence_inner(float %pre.cond.val, i32 %n.
; GFX10-NEXT: s_add_i32 s6, s6, 1
; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6
; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
; GFX10-NEXT: flat_store_byte v[6:7], v0
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_cbranch_execnz .LBB3_1
; GFX10-NEXT: ; %bb.4: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -288,14 +288,14 @@ define void @nested_loops_temporal_divergence_outer(float %pre.cond.val, i32 %n.
; GFX10-LABEL: nested_loops_temporal_divergence_outer:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0
; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: .LBB4_1: ; %OuterHeader
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB4_2 Depth 2
; GFX10-NEXT: s_ashr_i32 s7, s6, 31
-; GFX10-NEXT: s_mov_b32 s4, s8
+; GFX10-NEXT: s_mov_b32 s4, s5
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: v_mov_b32_e32 v6, s10
@@ -330,13 +330,13 @@ define void @nested_loops_temporal_divergence_outer(float %pre.cond.val, i32 %n.
; GFX10-NEXT: s_add_i32 s6, s6, 1
; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6
; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
; GFX10-NEXT: flat_store_byte v[6:7], v0
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_cbranch_execnz .LBB4_1
; GFX10-NEXT: ; %bb.4: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -379,15 +379,15 @@ define void @nested_loops_temporal_divergence_both(float %pre.cond.val, i32 %n.i
; GFX10-LABEL: nested_loops_temporal_divergence_both:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0
; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: .LBB5_1: ; %OuterHeader
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB5_2 Depth 2
; GFX10-NEXT: s_ashr_i32 s7, s6, 31
-; GFX10-NEXT: s_mov_b32 s4, s8
+; GFX10-NEXT: s_mov_b32 s4, s5
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2
; GFX10-NEXT: v_mov_b32_e32 v8, s10
; GFX10-NEXT: v_mov_b32_e32 v9, s11
@@ -421,13 +421,13 @@ define void @nested_loops_temporal_divergence_both(float %pre.cond.val, i32 %n.i
; GFX10-NEXT: s_add_i32 s6, s6, 1
; GFX10-NEXT: v_add_co_u32 v8, s4, v4, v8
; GFX10-NEXT: v_add_co_ci_u32_e64 v9, s4, v5, v9, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
; GFX10-NEXT: flat_store_byte v[8:9], v0
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_cbranch_execnz .LBB5_1
; GFX10-NEXT: ; %bb.4: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: flat_store_byte v[6:7], v0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
index 5240bf4..9aaa9635 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
@@ -547,8 +547,8 @@ define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %
;
; NEW_RBS-LABEL: loop_with_2breaks:
; NEW_RBS: ; %bb.0: ; %entry
-; NEW_RBS-NEXT: s_mov_b32 s4, 0
; NEW_RBS-NEXT: s_mov_b32 s0, 0
+; NEW_RBS-NEXT: s_mov_b32 s4, 0
; NEW_RBS-NEXT: ; implicit-def: $sgpr5
; NEW_RBS-NEXT: s_branch .LBB16_3
; NEW_RBS-NEXT: .LBB16_1: ; %Flow3
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 54b1554..df77e7d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -40,34 +40,33 @@ define amdgpu_kernel void @udiv_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: udiv_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s6, v0
+; GFX6-NEXT: s_mul_i32 s6, s6, s5
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_sub_i32 s6, s4, s5
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_i32:
@@ -138,31 +137,30 @@ define amdgpu_kernel void @urem_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: urem_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s6, v0
+; GFX6-NEXT: s_mul_i32 s6, s6, s5
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_sub_i32 s6, s4, s5
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
+; GFX6-NEXT: s_sub_i32 s6, s4, s5
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_i32:
@@ -242,40 +240,39 @@ define amdgpu_kernel void @sdiv_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: sdiv_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_abs_i32 s8, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8
-; GFX6-NEXT: s_sub_i32 s4, 0, s8
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_xor_b32 s1, s2, s3
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_abs_i32 s6, s5
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6
+; GFX6-NEXT: s_sub_i32 s2, 0, s6
+; GFX6-NEXT: s_abs_i32 s7, s4
+; GFX6-NEXT: s_xor_b32 s4, s4, s5
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: s_ashr_i32 s1, s1, 31
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_abs_i32 s0, s2
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
-; GFX6-NEXT: v_readfirstlane_b32 s2, v0
-; GFX6-NEXT: s_mul_i32 s2, s2, s8
-; GFX6-NEXT: s_sub_i32 s0, s0, s2
-; GFX6-NEXT: s_sub_i32 s2, s0, s8
+; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT: v_readfirstlane_b32 s5, v0
+; GFX6-NEXT: s_mul_i32 s5, s5, s6
+; GFX6-NEXT: s_sub_i32 s5, s7, s5
+; GFX6-NEXT: s_sub_i32 s7, s5, s6
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s5, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_cselect_b32 s0, s2, s0
+; GFX6-NEXT: s_cselect_b32 s5, s7, s5
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s5, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s1, v0
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s1, v0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_xor_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_i32:
@@ -360,36 +357,35 @@ define amdgpu_kernel void @srem_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: srem_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_abs_i32 s3, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_abs_i32 s8, s2
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_abs_i32 s5, s5
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_abs_i32 s6, s4
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
-; GFX6-NEXT: v_readfirstlane_b32 s1, v0
-; GFX6-NEXT: s_mul_i32 s1, s1, s3
-; GFX6-NEXT: s_sub_i32 s1, s8, s1
-; GFX6-NEXT: s_sub_i32 s2, s1, s3
-; GFX6-NEXT: s_cmp_ge_u32 s1, s3
-; GFX6-NEXT: s_cselect_b32 s1, s2, s1
-; GFX6-NEXT: s_sub_i32 s2, s1, s3
-; GFX6-NEXT: s_cmp_ge_u32 s1, s3
-; GFX6-NEXT: s_cselect_b32 s1, s2, s1
-; GFX6-NEXT: s_xor_b32 s1, s1, s0
-; GFX6-NEXT: s_sub_i32 s0, s1, s0
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
+; GFX6-NEXT: v_readfirstlane_b32 s7, v0
+; GFX6-NEXT: s_mul_i32 s7, s7, s5
+; GFX6-NEXT: s_sub_i32 s6, s6, s7
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s7, s6
+; GFX6-NEXT: s_xor_b32 s5, s5, s4
+; GFX6-NEXT: s_sub_i32 s4, s5, s4
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i32:
@@ -5462,15 +5458,14 @@ define amdgpu_kernel void @udiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: udiv_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_add_i32 s0, s3, 12
-; GFX6-NEXT: s_lshr_b32 s0, s2, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_add_i32 s5, s5, 12
+; GFX6-NEXT: s_lshr_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_i32_pow2_shl_denom:
@@ -5503,16 +5498,15 @@ define amdgpu_kernel void @udiv_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: udiv_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_lshr_b32 s0, s2, 12
-; GFX6-NEXT: s_lshr_b32 s1, s3, 12
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshr_b32 s4, s4, 12
+; GFX6-NEXT: s_lshr_b32 s5, s5, 12
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_v2i32_pow2k_denom:
@@ -5546,19 +5540,18 @@ define amdgpu_kernel void @udiv_v2i32_mixed_pow2k_denom(ptr addrspace(1) %out, <
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: v_mov_b32_e32 v0, 0x100101
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_mul_hi_u32 v0, s3, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_lshr_b32 s0, s2, 12
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s3, v0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_mul_hi_u32 v0, s5, v0
+; GFX6-NEXT: s_lshr_b32 s4, s4, 12
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s5, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 1, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v1, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 11, v0
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_v2i32_mixed_pow2k_denom:
@@ -5855,16 +5848,15 @@ define amdgpu_kernel void @urem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: urem_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_lshl_b32 s0, 0x1000, s3
-; GFX6-NEXT: s_add_i32 s0, s0, -1
-; GFX6-NEXT: s_and_b32 s0, s2, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s5, 0x1000, s5
+; GFX6-NEXT: s_add_i32 s5, s5, -1
+; GFX6-NEXT: s_and_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_i32_pow2_shl_denom:
@@ -5898,16 +5890,15 @@ define amdgpu_kernel void @urem_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: urem_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_and_b32 s0, s2, 0xfff
-; GFX6-NEXT: s_and_b32 s1, s3, 0xfff
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, 0xfff
+; GFX6-NEXT: s_and_b32 s5, s5, 0xfff
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_v2i32_pow2k_denom:
@@ -6187,41 +6178,40 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: sdiv_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_abs_i32 s8, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8
-; GFX6-NEXT: s_sub_i32 s4, 0, s8
-; GFX6-NEXT: s_abs_i32 s9, s2
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s5, 0x1000, s5
+; GFX6-NEXT: s_abs_i32 s6, s5
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6
+; GFX6-NEXT: s_sub_i32 s2, 0, s6
+; GFX6-NEXT: s_abs_i32 s7, s4
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s8
-; GFX6-NEXT: s_sub_i32 s0, s9, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s8
+; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT: v_readfirstlane_b32 s8, v0
+; GFX6-NEXT: s_mul_i32 s8, s8, s6
+; GFX6-NEXT: s_sub_i32 s7, s7, s8
+; GFX6-NEXT: s_sub_i32 s8, s7, s6
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s7, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_cselect_b32 s7, s8, s7
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s7, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX6-NEXT: s_xor_b32 s0, s2, s3
+; GFX6-NEXT: s_xor_b32 s4, s4, s5
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_ashr_i32 s0, s0, 31
-; GFX6-NEXT: v_xor_b32_e32 v0, s0, v0
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
+; GFX6-NEXT: v_xor_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_i32_pow2_shl_denom:
@@ -6279,22 +6269,21 @@ define amdgpu_kernel void @sdiv_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: sdiv_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
-; GFX6-NEXT: s_ashr_i32 s1, s3, 31
-; GFX6-NEXT: s_lshr_b32 s0, s0, 20
-; GFX6-NEXT: s_lshr_b32 s1, s1, 20
-; GFX6-NEXT: s_add_i32 s0, s2, s0
-; GFX6-NEXT: s_add_i32 s1, s3, s1
-; GFX6-NEXT: s_ashr_i32 s0, s0, 12
-; GFX6-NEXT: s_ashr_i32 s1, s1, 12
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_ashr_i32 s6, s4, 31
+; GFX6-NEXT: s_ashr_i32 s7, s5, 31
+; GFX6-NEXT: s_lshr_b32 s6, s6, 20
+; GFX6-NEXT: s_lshr_b32 s7, s7, 20
+; GFX6-NEXT: s_add_i32 s4, s4, s6
+; GFX6-NEXT: s_add_i32 s5, s5, s7
+; GFX6-NEXT: s_ashr_i32 s4, s4, 12
+; GFX6-NEXT: s_ashr_i32 s5, s5, 12
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_v2i32_pow2k_denom:
@@ -6334,22 +6323,21 @@ define amdgpu_kernel void @ssdiv_v2i32_mixed_pow2k_denom(ptr addrspace(1) %out,
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: v_mov_b32_e32 v0, 0x80080081
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_mul_hi_i32 v0, s3, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
-; GFX6-NEXT: s_lshr_b32 s0, s0, 20
-; GFX6-NEXT: s_add_i32 s0, s2, s0
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s3, v0
-; GFX6-NEXT: s_ashr_i32 s0, s0, 12
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_mul_hi_i32 v0, s5, v0
+; GFX6-NEXT: s_ashr_i32 s6, s4, 31
+; GFX6-NEXT: s_lshr_b32 s6, s6, 20
+; GFX6-NEXT: s_add_i32 s4, s4, s6
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s5, v0
+; GFX6-NEXT: s_ashr_i32 s4, s4, 12
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 31, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 11, v0
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_add_i32_e32 v1, vcc, v0, v1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: ssdiv_v2i32_mixed_pow2k_denom:
@@ -6700,37 +6688,36 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: srem_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_abs_i32 s3, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_abs_i32 s8, s2
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s2, 0x1000, s5
+; GFX6-NEXT: s_abs_i32 s5, s2
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_abs_i32 s6, s4
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s8, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: s_ashr_i32 s1, s2, 31
-; GFX6-NEXT: s_xor_b32 s0, s0, s1
-; GFX6-NEXT: s_sub_i32 s0, s0, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
+; GFX6-NEXT: v_readfirstlane_b32 s7, v0
+; GFX6-NEXT: s_mul_i32 s7, s7, s5
+; GFX6-NEXT: s_sub_i32 s6, s6, s7
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s7, s6
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
+; GFX6-NEXT: s_xor_b32 s5, s5, s4
+; GFX6-NEXT: s_sub_i32 s4, s5, s4
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i32_pow2_shl_denom:
@@ -6785,24 +6772,23 @@ define amdgpu_kernel void @srem_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: srem_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
-; GFX6-NEXT: s_ashr_i32 s1, s3, 31
-; GFX6-NEXT: s_lshr_b32 s0, s0, 20
-; GFX6-NEXT: s_lshr_b32 s1, s1, 20
-; GFX6-NEXT: s_add_i32 s0, s2, s0
-; GFX6-NEXT: s_add_i32 s1, s3, s1
-; GFX6-NEXT: s_and_b32 s0, s0, 0xfffff000
-; GFX6-NEXT: s_and_b32 s1, s1, 0xfffff000
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s3, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_ashr_i32 s6, s4, 31
+; GFX6-NEXT: s_lshr_b32 s6, s6, 20
+; GFX6-NEXT: s_ashr_i32 s7, s5, 31
+; GFX6-NEXT: s_add_i32 s6, s4, s6
+; GFX6-NEXT: s_lshr_b32 s7, s7, 20
+; GFX6-NEXT: s_and_b32 s6, s6, 0xfffff000
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_add_i32 s6, s5, s7
+; GFX6-NEXT: s_and_b32 s6, s6, 0xfffff000
+; GFX6-NEXT: s_sub_i32 s5, s5, s6
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_v2i32_pow2k_denom:
diff --git a/llvm/test/CodeGen/AMDGPU/and.ll b/llvm/test/CodeGen/AMDGPU/and.ll
index 29bfc25..fe9ec8e 100644
--- a/llvm/test/CodeGen/AMDGPU/and.ll
+++ b/llvm/test/CodeGen/AMDGPU/and.ll
@@ -123,27 +123,25 @@ define amdgpu_kernel void @s_and_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
; GFX6-LABEL: s_and_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_and_b32 s0, s2, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_and_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_and_b32 s0, s2, s3
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_and_b32 s4, s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
%and = and i32 %a, %b
store i32 %and, ptr addrspace(1) %out, align 4
@@ -189,36 +187,34 @@ define amdgpu_kernel void @s_and_multi_use_constant_i32_0(ptr addrspace(1) %out,
; GFX6-LABEL: s_and_multi_use_constant_i32_0:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
-; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX6-NEXT: s_add_i32 s0, s0, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
-; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, 0x12d687
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX6-NEXT: s_add_i32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v1, s4
+; GFX6-NEXT: buffer_store_dword v1, off, s[0:3], 0
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_and_multi_use_constant_i32_0:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, 0x12d687
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX8-NEXT: s_add_i32 s0, s0, s3
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v1, s4
+; GFX8-NEXT: buffer_store_dword v1, off, s[0:3], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, 0x12d687
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_endpgm
%and = and i32 %a, 1234567
@@ -236,32 +232,30 @@ define amdgpu_kernel void @s_and_multi_use_constant_i32_1(ptr addrspace(1) %out,
; GFX6-LABEL: s_and_multi_use_constant_i32_1:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX6-NEXT: s_add_i32 s0, s0, s3
-; GFX6-NEXT: s_add_i32 s0, s0, 0x12d687
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX6-NEXT: s_add_i32 s4, s4, s5
+; GFX6-NEXT: s_add_i32 s4, s4, 0x12d687
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_and_multi_use_constant_i32_1:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX8-NEXT: s_add_i32 s0, s0, s3
-; GFX8-NEXT: s_add_i32 s0, s0, 0x12d687
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s4, s4, 0x12d687
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_endpgm
%and = and i32 %a, 1234567
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
index 30a7864..39618b0 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
@@ -368,7 +368,10 @@ define amdgpu_ps float @test_clamp_v2bf16_s(<2 x bfloat> inreg %src) {
define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) {
; GCN-LABEL: test_clamp_bf16_folding:
; GCN: ; %bb.0:
-; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp
+; GCN-NEXT: v_exp_bf16_e32 v0, v0
+; GCN-NEXT: v_nop
+; GCN-NEXT: s_delay_alu instid0(TRANS32_DEP_1)
+; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
; GCN-NEXT: ; return to shader part epilog
%exp = call bfloat @llvm.exp2.bf16(bfloat %src)
%max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0)
diff --git a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
index c14678c..c0d5f8a 100644
--- a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
@@ -120,17 +120,17 @@ define amdgpu_kernel void @s_ubfe_sub_i32(ptr addrspace(1) %out, i32 %src, i32 %
; SI-LABEL: s_ubfe_sub_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_lshr_b32 s2, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s2
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_sub_i32 s2, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s2
+; SI-NEXT: s_lshr_b32 s4, s4, s2
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: v_mov_b32_e32 v2, s4
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_ubfe_sub_i32:
@@ -160,20 +160,20 @@ define amdgpu_kernel void @s_ubfe_sub_multi_use_shl_i32(ptr addrspace(1) %out, i
; SI-LABEL: s_ubfe_sub_multi_use_shl_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s6, 0
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_lshr_b32 s3, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s3
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: s_sub_i32 s3, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s3
+; SI-NEXT: s_lshr_b32 s5, s4, s3
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: v_mov_b32_e32 v2, s5
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_endpgm
;
@@ -322,17 +322,17 @@ define amdgpu_kernel void @s_sbfe_sub_i32(ptr addrspace(1) %out, i32 %src, i32 %
; SI-LABEL: s_sbfe_sub_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_ashr_i32 s2, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s2
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_sub_i32 s2, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s2
+; SI-NEXT: s_ashr_i32 s4, s4, s2
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: v_mov_b32_e32 v2, s4
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sbfe_sub_i32:
@@ -362,20 +362,20 @@ define amdgpu_kernel void @s_sbfe_sub_multi_use_shl_i32(ptr addrspace(1) %out, i
; SI-LABEL: s_sbfe_sub_multi_use_shl_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s6, 0
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_ashr_i32 s3, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s3
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: s_sub_i32 s3, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s3
+; SI-NEXT: s_ashr_i32 s5, s4, s3
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: v_mov_b32_e32 v2, s5
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/bfi_nested.ll b/llvm/test/CodeGen/AMDGPU/bfi_nested.ll
index bd76f34..7326adae 100644
--- a/llvm/test/CodeGen/AMDGPU/bfi_nested.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfi_nested.ll
@@ -284,16 +284,15 @@ define amdgpu_kernel void @v_bfi_dont_applied_for_scalar_ops(ptr addrspace(1) %o
; GCN-LABEL: v_bfi_dont_applied_for_scalar_ops:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_and_b32 s3, s3, 0xffff0000
-; GCN-NEXT: s_and_b32 s2, s2, 0xffff
-; GCN-NEXT: s_or_b32 s2, s2, s3
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_and_b32 s2, s5, 0xffff0000
+; GCN-NEXT: s_and_b32 s4, s4, 0xffff
+; GCN-NEXT: s_or_b32 s4, s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%shift = lshr i32 %b, 16
%tr = trunc i32 %shift to i16
diff --git a/llvm/test/CodeGen/AMDGPU/bfm.ll b/llvm/test/CodeGen/AMDGPU/bfm.ll
index a12b5ea..172e07f 100644
--- a/llvm/test/CodeGen/AMDGPU/bfm.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfm.ll
@@ -6,14 +6,13 @@ define amdgpu_kernel void @s_bfm_pattern(ptr addrspace(1) %out, i32 %x, i32 %y)
; SI-LABEL: s_bfm_pattern:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_bfm_b32 s2, s2, s3
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_bfm_b32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_bfm_pattern:
diff --git a/llvm/test/CodeGen/AMDGPU/bitreverse.ll b/llvm/test/CodeGen/AMDGPU/bitreverse.ll
index d4f5617..e33b9ab 100644
--- a/llvm/test/CodeGen/AMDGPU/bitreverse.ll
+++ b/llvm/test/CodeGen/AMDGPU/bitreverse.ll
@@ -362,31 +362,29 @@ define amdgpu_kernel void @s_brev_v2i32(ptr addrspace(1) noalias %out, <2 x i32>
; SI-LABEL: s_brev_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_brev_b32 s0, s3
-; SI-NEXT: s_brev_b32 s1, s2
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_brev_b32 s5, s5
+; SI-NEXT: s_brev_b32 s4, s4
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; FLAT-LABEL: s_brev_v2i32:
; FLAT: ; %bb.0:
; FLAT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; FLAT-NEXT: s_mov_b32 s7, 0xf000
-; FLAT-NEXT: s_mov_b32 s6, -1
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
-; FLAT-NEXT: s_mov_b32 s4, s0
-; FLAT-NEXT: s_mov_b32 s5, s1
-; FLAT-NEXT: s_brev_b32 s0, s3
-; FLAT-NEXT: s_brev_b32 s1, s2
-; FLAT-NEXT: v_mov_b32_e32 v0, s1
-; FLAT-NEXT: v_mov_b32_e32 v1, s0
-; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; FLAT-NEXT: s_mov_b64 s[4:5], s[2:3]
+; FLAT-NEXT: s_brev_b32 s5, s5
+; FLAT-NEXT: s_brev_b32 s4, s4
+; FLAT-NEXT: s_mov_b32 s3, 0xf000
+; FLAT-NEXT: s_mov_b32 s2, -1
+; FLAT-NEXT: v_mov_b32_e32 v0, s4
+; FLAT-NEXT: v_mov_b32_e32 v1, s5
+; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; FLAT-NEXT: s_endpgm
;
; GISEL-LABEL: s_brev_v2i32:
@@ -405,16 +403,14 @@ define amdgpu_kernel void @s_brev_v2i32(ptr addrspace(1) noalias %out, <2 x i32>
; GFX11-FLAT-LABEL: s_brev_v2i32:
; GFX11-FLAT: ; %bb.0:
; GFX11-FLAT-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-FLAT-NEXT: s_mov_b32 s7, 0x31016000
-; GFX11-FLAT-NEXT: s_mov_b32 s6, -1
; GFX11-FLAT-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FLAT-NEXT: s_brev_b32 s2, s2
; GFX11-FLAT-NEXT: s_brev_b32 s3, s3
; GFX11-FLAT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FLAT-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX11-FLAT-NEXT: s_mov_b32 s4, s0
-; GFX11-FLAT-NEXT: s_mov_b32 s5, s1
-; GFX11-FLAT-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
+; GFX11-FLAT-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-FLAT-NEXT: s_mov_b32 s2, -1
+; GFX11-FLAT-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX11-FLAT-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: s_brev_v2i32:
diff --git a/llvm/test/CodeGen/AMDGPU/build_vector.ll b/llvm/test/CodeGen/AMDGPU/build_vector.ll
index 763f436..fbaaef0 100644
--- a/llvm/test/CodeGen/AMDGPU/build_vector.ll
+++ b/llvm/test/CodeGen/AMDGPU/build_vector.ll
@@ -255,16 +255,15 @@ define amdgpu_kernel void @build_v2i32_from_v4i16_shuffle(ptr addrspace(1) %out,
; GFX6-LABEL: build_v2i32_from_v4i16_shuffle:
; GFX6: ; %bb.0: ; %entry
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_lshl_b32 s0, s3, 16
-; GFX6-NEXT: s_lshl_b32 s1, s2, 16
-; GFX6-NEXT: v_mov_b32_e32 v0, s1
-; GFX6-NEXT: v_mov_b32_e32 v1, s0
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: build_v2i32_from_v4i16_shuffle:
diff --git a/llvm/test/CodeGen/AMDGPU/cc-entry.ll b/llvm/test/CodeGen/AMDGPU/cc-entry.ll
new file mode 100644
index 0000000..d807f32
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/cc-entry.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s
+
+define amdgpu_kernel void @entry_fn() {
+; CHECK-LABEL: entry_fn:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_getpc_b64 s[4:5]
+; CHECK-NEXT: s_sext_i32_i16 s5, s5
+; CHECK-NEXT: s_add_co_u32 s4, s4, entry_fn@gotpcrel32@lo+8
+; CHECK-NEXT: s_add_co_ci_u32 s5, s5, entry_fn@gotpcrel32@hi+16
+; CHECK-NEXT: s_mov_b32 s32, 0
+; CHECK-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT: s_endpgm
+entry:
+ call void @entry_fn()
+ ret void
+}
+
+define void @caller() {
+; CHECK-LABEL: caller:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_mov_b32 s0, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_or_saveexec_b32 s1, -1
+; CHECK-NEXT: scratch_store_b32 off, v40, s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_mov_b32 exec_lo, s1
+; CHECK-NEXT: s_add_co_i32 s32, s32, 16
+; CHECK-NEXT: v_writelane_b32 v40, s0, 2
+; CHECK-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CHECK-NEXT: s_getpc_b64 s[4:5]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s5, s5
+; CHECK-NEXT: s_add_co_u32 s4, s4, entry_fn@gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s5, s5, entry_fn@gotpcrel32@hi+24
+; CHECK-NEXT: v_mov_b32_e32 v0, v31
+; CHECK-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; CHECK-NEXT: v_writelane_b32 v40, s30, 0
+; CHECK-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CHECK-NEXT: s_mov_b64 s[6:7], s[10:11]
+; CHECK-NEXT: v_writelane_b32 v40, s31, 1
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_readlane_b32 s31, v40, 1
+; CHECK-NEXT: v_readlane_b32 s30, v40, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: v_readlane_b32 s0, v40, 2
+; CHECK-NEXT: s_or_saveexec_b32 s1, -1
+; CHECK-NEXT: scratch_load_b32 v40, off, s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_mov_b32 exec_lo, s1
+; CHECK-NEXT: s_mov_b32 s33, s0
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ call void @entry_fn()
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
index 3d315f8..4cbd41c 100644
--- a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
@@ -647,20 +647,20 @@ define amdgpu_kernel void @sub_zext_setcc_commute(ptr addrspace(1) nocapture %ar
; GCN-LABEL: sub_zext_setcc_commute:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, 0
; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b64 s[4:5], s[0:1]
-; GCN-NEXT: buffer_load_dword v4, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s2, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
-; GCN-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s5, v0
+; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: s_endpgm
;
; GFX9-LABEL: sub_zext_setcc_commute:
@@ -696,20 +696,20 @@ define amdgpu_kernel void @sub_sext_setcc_commute(ptr addrspace(1) nocapture %ar
; GCN-LABEL: sub_sext_setcc_commute:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, 0
; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b64 s[4:5], s[0:1]
-; GCN-NEXT: buffer_load_dword v4, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s2, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
-; GCN-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s5, v0
+; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: s_endpgm
;
; GFX9-LABEL: sub_sext_setcc_commute:
diff --git a/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll b/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll
new file mode 100644
index 0000000..244c3f7c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll
@@ -0,0 +1,46 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+define i32 @known_positive(float nofpclass(nan ninf nzero nsub nnorm) %signbit.zero) #0 {
+; CHECK-LABEL: known_positive:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.zero to i32
+ %and = and i32 %cast, 2147483647
+ ret i32 %and
+}
+
+define i32 @known_positive_maybe_nan(float nofpclass(ninf nzero nsub nnorm) %signbit.zero) #0 {
+; CHECK-LABEL: known_positive_maybe_nan:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.zero to i32
+ %and = and i32 %cast, 2147483647
+ ret i32 %and
+}
+
+define i32 @known_negative(float nofpclass(nan pinf pzero psub pnorm) %signbit.one) #0 {
+; CHECK-LABEL: known_negative:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.one to i32
+ %or = or i32 %cast, -2147483648
+ ret i32 %or
+}
+
+define i32 @known_negative_maybe_nan(float nofpclass(pinf pzero psub pnorm) %signbit.one) #0 {
+; CHECK-LABEL: known_negative_maybe_nan:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v0, 0x80000000, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.one to i32
+ %or = or i32 %cast, -2147483648
+ ret i32 %or
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
index f5227ee..ef676dd 100644
--- a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
+++ b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
@@ -345,15 +345,13 @@ define float @test_copysign_pow_fast_f32__integral_y(float %x, i32 %y.i) {
; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v4, vcc
; GFX9-NEXT: v_fma_f32 v2, v2, v1, v3
-; GFX9-NEXT: v_cvt_i32_f32_e32 v1, v1
; GFX9-NEXT: v_exp_f32_e32 v2, v2
+; GFX9-NEXT: v_cvt_i32_f32_e32 v1, v1
; GFX9-NEXT: v_not_b32_e32 v3, 63
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
; GFX9-NEXT: v_ldexp_f32 v2, v2, v3
-; GFX9-NEXT: v_and_b32_e32 v0, v1, v0
-; GFX9-NEXT: s_brev_b32 s4, -2
-; GFX9-NEXT: v_bfi_b32 v0, s4, v2, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%y = sitofp i32 %y.i to float
%y.fptosi = fptosi float %y to i32
@@ -370,4 +368,109 @@ define float @test_copysign_pow_fast_f32__integral_y(float %x, i32 %y.i) {
ret float %pow_sign1
}
+define double @test_pow_fast_f64integral_y(double %x, i32 %y.i) #0 {
+; GFX9-LABEL: test_pow_fast_f64integral_y:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s16, s33
+; GFX9-NEXT: s_mov_b32 s33, s32
+; GFX9-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
+; GFX9-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-NEXT: v_writelane_b32 v43, s16, 14
+; GFX9-NEXT: v_writelane_b32 v43, s30, 0
+; GFX9-NEXT: v_writelane_b32 v43, s31, 1
+; GFX9-NEXT: v_writelane_b32 v43, s34, 2
+; GFX9-NEXT: v_writelane_b32 v43, s35, 3
+; GFX9-NEXT: v_writelane_b32 v43, s36, 4
+; GFX9-NEXT: v_writelane_b32 v43, s37, 5
+; GFX9-NEXT: v_writelane_b32 v43, s38, 6
+; GFX9-NEXT: v_writelane_b32 v43, s39, 7
+; GFX9-NEXT: v_writelane_b32 v43, s48, 8
+; GFX9-NEXT: v_writelane_b32 v43, s49, 9
+; GFX9-NEXT: v_writelane_b32 v43, s50, 10
+; GFX9-NEXT: s_addk_i32 s32, 0x800
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s33 ; 4-byte Folded Spill
+; GFX9-NEXT: v_writelane_b32 v43, s51, 11
+; GFX9-NEXT: v_mov_b32_e32 v42, v1
+; GFX9-NEXT: v_writelane_b32 v43, s52, 12
+; GFX9-NEXT: v_and_b32_e32 v1, 0x7fffffff, v42
+; GFX9-NEXT: s_getpc_b64 s[16:17]
+; GFX9-NEXT: s_add_u32 s16, s16, _Z4log2d@rel32@lo+4
+; GFX9-NEXT: s_addc_u32 s17, s17, _Z4log2d@rel32@hi+12
+; GFX9-NEXT: v_writelane_b32 v43, s53, 13
+; GFX9-NEXT: v_mov_b32_e32 v40, v31
+; GFX9-NEXT: v_mov_b32_e32 v41, v2
+; GFX9-NEXT: s_mov_b32 s50, s15
+; GFX9-NEXT: s_mov_b32 s51, s14
+; GFX9-NEXT: s_mov_b32 s52, s13
+; GFX9-NEXT: s_mov_b32 s53, s12
+; GFX9-NEXT: s_mov_b64 s[34:35], s[10:11]
+; GFX9-NEXT: s_mov_b64 s[36:37], s[8:9]
+; GFX9-NEXT: s_mov_b64 s[38:39], s[6:7]
+; GFX9-NEXT: s_mov_b64 s[48:49], s[4:5]
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: v_cvt_f64_i32_e32 v[2:3], v41
+; GFX9-NEXT: s_getpc_b64 s[16:17]
+; GFX9-NEXT: s_add_u32 s16, s16, _Z4exp2d@rel32@lo+4
+; GFX9-NEXT: s_addc_u32 s17, s17, _Z4exp2d@rel32@hi+12
+; GFX9-NEXT: s_mov_b64 s[4:5], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[6:7], s[38:39]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_mov_b64 s[8:9], s[36:37]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s53
+; GFX9-NEXT: s_mov_b32 s13, s52
+; GFX9-NEXT: s_mov_b32 s14, s51
+; GFX9-NEXT: s_mov_b32 s15, s50
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v41
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v42
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s33 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX9-NEXT: v_readlane_b32 s53, v43, 13
+; GFX9-NEXT: v_readlane_b32 s52, v43, 12
+; GFX9-NEXT: v_readlane_b32 s51, v43, 11
+; GFX9-NEXT: v_readlane_b32 s50, v43, 10
+; GFX9-NEXT: v_readlane_b32 s49, v43, 9
+; GFX9-NEXT: v_readlane_b32 s48, v43, 8
+; GFX9-NEXT: v_readlane_b32 s39, v43, 7
+; GFX9-NEXT: v_readlane_b32 s38, v43, 6
+; GFX9-NEXT: v_readlane_b32 s37, v43, 5
+; GFX9-NEXT: v_readlane_b32 s36, v43, 4
+; GFX9-NEXT: v_readlane_b32 s35, v43, 3
+; GFX9-NEXT: v_readlane_b32 s34, v43, 2
+; GFX9-NEXT: v_readlane_b32 s31, v43, 1
+; GFX9-NEXT: v_readlane_b32 s30, v43, 0
+; GFX9-NEXT: s_mov_b32 s32, s33
+; GFX9-NEXT: v_readlane_b32 s4, v43, 14
+; GFX9-NEXT: s_or_saveexec_b64 s[6:7], -1
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b64 exec, s[6:7]
+; GFX9-NEXT: s_mov_b32 s33, s4
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %fabs = call fast double @llvm.fabs.f64(double %x)
+ %log2 = call fast double @_Z4log2d(double %fabs)
+ %pownI2F = sitofp i32 %y.i to double
+ %ylogx = fmul fast double %log2, %pownI2F
+ %exp2 = call fast nofpclass(nan ninf nzero nsub nnorm) double @_Z4exp2d(double %ylogx)
+ %ytou = zext i32 %y.i to i64
+ %yeven = shl i64 %ytou, 63
+ %x.i64 = bitcast double %x to i64
+ %pow_sign = and i64 %yeven, %x.i64
+ %pow_sign.f64 = bitcast i64 %pow_sign to double
+ %pow_sign1 = call fast double @llvm.copysign.f64(double %exp2, double %pow_sign.f64)
+ ret double %pow_sign1
+}
+
+declare hidden double @_Z4exp2d(double) #1
+declare hidden double @_Z4log2d(double) #1
+
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+attributes #1 = { norecurse nounwind memory(read) }
diff --git a/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll b/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll
new file mode 100644
index 0000000..afd610f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+
+; Negative test, don't know %x is positive
+define half @copysign_known_signmask_f16(half %x, i16 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 15, v1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i16 %sign, 15
+ %signmask.bitcast = bitcast i16 %signmask to half
+ %result = call half @llvm.copysign.f16(half %x, half %signmask.bitcast)
+ ret half %result
+}
+
+; Negative test, don't know %x is positive
+define float @copysign_known_signmask_f32(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %x, float %signmask.bitcast)
+ ret float %result
+}
+
+; Negative test, don't know %x is positive
+define double @copysign_known_signmask_f64(double %x, i64 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v2
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v1, s4, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i64 %sign, 63
+ %signmask.bitcast = bitcast i64 %signmask to double
+ %result = call double @llvm.copysign.f64(double %x, double %signmask.bitcast)
+ ret double %result
+}
+
+; Negative test, don't know %x is positive
+define float @copysign_known_signmask_f32_known_not_known_positive_mag_maybe_nan(float nofpclass(ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_not_known_positive_mag_maybe_nan:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+; Negative test, don't know %x is positive
+define float @copysign_known_signmask_f32_known_not_known_positive_mag_maybe_negzero(float nofpclass(nan ninf nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_not_known_positive_mag_maybe_negzero:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define half @copysign_known_signmask_f16_known_positive_mag(half nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i16 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f16_known_positive_mag:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 15, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i16 %sign, 15
+ %signmask.bitcast = bitcast i16 %signmask to half
+ %result = call half @llvm.copysign.f16(half %sign.bit.known.zero, half %signmask.bitcast)
+ ret half %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag(float nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define double @copysign_known_signmask_f64_known_positive_mag(double nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i64 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f64_known_positive_mag:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v2
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i64 %sign, 63
+ %signmask.bitcast = bitcast i64 %signmask to double
+ %result = call double @llvm.copysign.f64(double %sign.bit.known.zero, double %signmask.bitcast)
+ ret double %result
+}
+
+; exp always returns a positive result, excluding the unknown nan sign
+; bit.
+define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xc2aeac50
+; GFX9-NEXT: v_add_f32_e32 v2, 0x42800000, v0
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
+; GFX9-NEXT: v_exp_f32_e32 v0, v0
+; GFX9-NEXT: v_mul_f32_e32 v2, 0x114b4ea4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signbit.known.zero = call nnan afn float @llvm.exp.f32(float %x)
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp2(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xc2fc0000
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x42800000
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_exp_f32_e32 v0, v0
+; GFX9-NEXT: v_not_b32_e32 v2, 63
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signbit.known.zero = call nnan afn float @llvm.exp2.f32(float %x)
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp10(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp10:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xc2fc0000
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x42800000
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_exp_f32_e32 v0, v0
+; GFX9-NEXT: v_not_b32_e32 v2, 63
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signbit.known.zero = call nnan afn float @llvm.exp2.f32(float %x)
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag_through_fence(float nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag_through_fence:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: ;ARITH_FENCE
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %fence = call float @llvm.arithmetic.fence.f32(float %sign.bit.known.zero)
+ %result = call float @llvm.copysign.f32(float %fence, float %signmask.bitcast)
+ ret float %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
index ab96dcf..08545b9 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
@@ -390,16 +390,15 @@ define amdgpu_kernel void @uniform_vec_i16_LH(ptr addrspace(1) %out, i16 %a, i32
; GCN-LABEL: uniform_vec_i16_LH:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_and_b32 s3, s3, 0xffff0000
-; GCN-NEXT: s_and_b32 s2, s2, 0xffff
-; GCN-NEXT: s_or_b32 s2, s2, s3
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_and_b32 s2, s5, 0xffff0000
+; GCN-NEXT: s_and_b32 s4, s4, 0xffff
+; GCN-NEXT: s_or_b32 s4, s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
;
; GFX9-LABEL: uniform_vec_i16_LH:
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll
index 4c3fd40..d8f9bc1 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll
@@ -5,15 +5,14 @@ define amdgpu_kernel void @uniform_sext_in_reg_i8_to_i32(ptr addrspace(1) %out,
; GCN-LABEL: uniform_sext_in_reg_i8_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_add_i32 s2, s2, s3
-; GCN-NEXT: s_sext_i32_i8 s2, s2
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_add_i32 s2, s4, s5
+; GCN-NEXT: s_sext_i32_i8 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%c = add i32 %a, %b ; add to prevent folding into extload
%shl = shl i32 %c, 24
@@ -26,15 +25,14 @@ define amdgpu_kernel void @divergent_sext_in_reg_i8_to_i32(ptr addrspace(1) %out
; GCN-LABEL: divergent_sext_in_reg_i8_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: s_add_i32 s0, s2, s3
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: s_add_i32 s4, s4, s5
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%c = add i32 %a, %b ; add to prevent folding into extload
@@ -49,15 +47,14 @@ define amdgpu_kernel void @uniform_sext_in_reg_i16_to_i32(ptr addrspace(1) %out,
; GCN-LABEL: uniform_sext_in_reg_i16_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_add_i32 s2, s2, s3
-; GCN-NEXT: s_sext_i32_i16 s2, s2
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_add_i32 s2, s4, s5
+; GCN-NEXT: s_sext_i32_i16 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%c = add i32 %a, %b ; add to prevent folding into extload
%shl = shl i32 %c, 16
@@ -70,15 +67,14 @@ define amdgpu_kernel void @divergent_sext_in_reg_i16_to_i32(ptr addrspace(1) %ou
; GCN-LABEL: divergent_sext_in_reg_i16_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: s_add_i32 s0, s2, s3
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: s_add_i32 s4, s4, s5
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%c = add i32 %a, %b ; add to prevent folding into extload
diff --git a/llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll b/llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll
new file mode 100644
index 0000000..08730038
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes=debugify < %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 | FileCheck %s
+
+@lds = addrspace(3) global [512 x float] poison, align 4
+
+define amdgpu_kernel void @simple_write2_one_val_f32(ptr addrspace(1) %C, ptr addrspace(1) %in) #0 {
+; CHECK-LABEL: simple_write2_one_val_f32:
+; CHECK: .Lfunc_begin0:
+; CHECK-NEXT: .cfi_sections .debug_frame
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: .file 1 "/" "<stdin>"
+; CHECK-NEXT: .loc 1 1 1 prologue_end ; <stdin>:1:1
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:1 <- $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:5 <- [DW_OP_plus_uconst 8, DW_OP_stack_value] $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:3 <- undef
+; CHECK-NEXT: .loc 1 2 1 ; <stdin>:2:1
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:4 <- $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:2 <- undef
+; CHECK-NEXT: .loc 1 3 1 ; <stdin>:3:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dword v1, v0, s[0:1]
+; CHECK-NEXT: .Ltmp2:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:6 <- [DW_OP_plus_uconst 32, DW_OP_stack_value] $vgpr0
+; CHECK-NEXT: .loc 1 0 0 is_stmt 0 ; <stdin>:0
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b32 v0, v1, v1 offset1:8
+; CHECK-NEXT: .loc 1 9 1 is_stmt 1 ; <stdin>:9:1
+; CHECK-NEXT: s_endpgm
+; CHECK-NEXT: .Ltmp3:
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %in.gep = getelementptr float, ptr addrspace(1) %in, i32 %x.i
+ %val = load float, ptr addrspace(1) %in.gep, align 4
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ store float %val, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ store float %val, ptr addrspace(3) %arrayidx1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f32(ptr addrspace(1) %out) #0 {
+; CHECK-LABEL: simple_read2_f32:
+; CHECK: .Lfunc_begin1:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: .loc 1 11 1 prologue_end ; <stdin>:11:1
+; CHECK-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:8 <- $vgpr2
+; CHECK-NEXT: .loc 1 0 0 is_stmt 0 ; <stdin>:0
+; CHECK-NEXT: ds_read2_b32 v[0:1], v2 offset1:8
+; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:9 <- undef
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:11 <- [DW_OP_plus_uconst 32, DW_OP_stack_value] $vgpr2
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:12 <- undef
+; CHECK-NEXT: .loc 1 10 1 is_stmt 1 ; <stdin>:10:1
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; CHECK-NEXT: .Ltmp6:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:7 <- undef
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:10 <- [DW_OP_plus_uconst 8, DW_OP_stack_value] undef
+; CHECK-NEXT: .loc 1 16 1 ; <stdin>:16:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: .Ltmp7:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:13 <- $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:14 <- undef
+; CHECK-NEXT: .loc 1 18 1 ; <stdin>:18:1
+; CHECK-NEXT: global_store_dword v2, v0, s[0:1]
+; CHECK-NEXT: .loc 1 19 1 ; <stdin>:19:1
+; CHECK-NEXT: s_endpgm
+; CHECK-NEXT: .Ltmp8:
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.ll b/llvm/test/CodeGen/AMDGPU/fabs.ll
index 6bcb086..97e23fc 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.ll
@@ -99,16 +99,15 @@ define amdgpu_kernel void @fabs_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
; SI-LABEL: fabs_v2f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_and_b32 s0, s3, 0x7fffffff
-; SI-NEXT: s_and_b32 s1, s2, 0x7fffffff
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_bitset0_b32 s5, 31
+; SI-NEXT: s_bitset0_b32 s4, 31
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fabs_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.ll b/llvm/test/CodeGen/AMDGPU/fdiv.ll
index b826e6c..4d448e6 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.ll
@@ -333,18 +333,17 @@ define amdgpu_kernel void @s_fdiv_25ulp_f32(ptr addrspace(1) %out, float %a, flo
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX67-NEXT: v_mov_b32_e32 v0, 0x6f800000
; GFX67-NEXT: v_mov_b32_e32 v1, 0x2f800000
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_cmp_gt_f32_e64 vcc, |s3|, v0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_cmp_gt_f32_e64 vcc, |s5|, v0
; GFX67-NEXT: v_cndmask_b32_e32 v0, 1.0, v1, vcc
-; GFX67-NEXT: v_mul_f32_e32 v1, s3, v0
+; GFX67-NEXT: v_mul_f32_e32 v1, s5, v0
; GFX67-NEXT: v_rcp_f32_e32 v1, v1
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v1, s2, v1
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v1, s4, v1
; GFX67-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_25ulp_f32:
@@ -441,20 +440,19 @@ define amdgpu_kernel void @s_fdiv_25ulp_ieee_f32(ptr addrspace(1) %out, float %a
; GFX7-LABEL: s_fdiv_25ulp_ieee_f32:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_mov_b32 s7, 0xf000
-; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_frexp_mant_f32_e32 v0, s3
+; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX7-NEXT: v_frexp_mant_f32_e32 v0, s5
; GFX7-NEXT: v_rcp_f32_e32 v0, v0
-; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v1, s3
-; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v2, s2
-; GFX7-NEXT: v_frexp_mant_f32_e32 v3, s2
+; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v1, s5
+; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v2, s4
+; GFX7-NEXT: v_frexp_mant_f32_e32 v3, s4
; GFX7-NEXT: v_mul_f32_e32 v0, v3, v0
; GFX7-NEXT: v_sub_i32_e32 v1, vcc, v2, v1
-; GFX7-NEXT: s_mov_b32 s4, s0
-; GFX7-NEXT: s_mov_b32 s5, s1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: v_ldexp_f32_e32 v0, v0, v1
-; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_25ulp_ieee_f32:
@@ -528,14 +526,13 @@ define amdgpu_kernel void @s_fdiv_fast_ieee_f32(ptr addrspace(1) %out, float %a,
; GFX67-LABEL: s_fdiv_fast_ieee_f32:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_fast_ieee_f32:
@@ -590,14 +587,13 @@ define amdgpu_kernel void @s_fdiv_f32_fast_math(ptr addrspace(1) %out, float %a,
; GFX67-LABEL: s_fdiv_f32_fast_math:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_f32_fast_math:
@@ -652,14 +648,13 @@ define amdgpu_kernel void @s_fdiv_ulp25_f32_fast_math(ptr addrspace(1) %out, flo
; GFX67-LABEL: s_fdiv_ulp25_f32_fast_math:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_ulp25_f32_fast_math:
@@ -877,14 +872,13 @@ define amdgpu_kernel void @s_fdiv_f32_arcp_ninf(ptr addrspace(1) %out, float %a,
; GFX67-LABEL: s_fdiv_f32_arcp_ninf:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_f32_arcp_ninf:
diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
index defcffa..39eefa1 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
@@ -75,9 +75,12 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out
; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src:
; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
-; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
-; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
+; SI: s_mov_b64 s[[[#COPY:]]:{{[0-9]+}}], s{{\[}}[[#LOAD + 2]]:[[#LOAD + 3]]{{\]}}
+; SI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#COPY]], 1.0
+; SI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#COPY + 1]], 2.0
+; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
+; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
@@ -96,8 +99,12 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1)
; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast:
; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
-; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
-; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
+; SI: s_mov_b64 s[[[#COPY:]]:{{[0-9]+}}], s{{\[}}[[#LOAD + 2]]:[[#LOAD + 3]]{{\]}}
+; SI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#COPY]], 1.0
+; SI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#COPY + 1]], 2.0
+
+; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
+; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
index a025c36..6c2ab5f 100644
--- a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
@@ -121,14 +121,13 @@ define amdgpu_kernel void @fnearbyint_v2f32(ptr addrspace(1) %out, <2 x float> %
; SICI-LABEL: fnearbyint_v2f32:
; SICI: ; %bb.0: ; %entry
; SICI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SICI-NEXT: s_mov_b32 s7, 0xf000
-; SICI-NEXT: s_mov_b32 s6, -1
; SICI-NEXT: s_waitcnt lgkmcnt(0)
-; SICI-NEXT: s_mov_b32 s4, s0
-; SICI-NEXT: s_mov_b32 s5, s1
-; SICI-NEXT: v_rndne_f32_e32 v1, s3
-; SICI-NEXT: v_rndne_f32_e32 v0, s2
-; SICI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SICI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SICI-NEXT: s_mov_b32 s3, 0xf000
+; SICI-NEXT: s_mov_b32 s2, -1
+; SICI-NEXT: v_rndne_f32_e32 v1, s5
+; SICI-NEXT: v_rndne_f32_e32 v0, s4
+; SICI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SICI-NEXT: s_endpgm
;
; VI-LABEL: fnearbyint_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll
index 1fa9bfa..214cced 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll
@@ -199,16 +199,15 @@ define amdgpu_kernel void @fneg_fabsf_v2f32(ptr addrspace(1) %out, <2 x float> %
; SI-LABEL: fneg_fabsf_v2f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_bitset1_b32 s3, 31
-; SI-NEXT: s_bitset1_b32 s2, 31
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: v_mov_b32_e32 v1, s3
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_bitset1_b32 s5, 31
+; SI-NEXT: s_bitset1_b32 s4, 31
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fneg_fabsf_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg.ll b/llvm/test/CodeGen/AMDGPU/fneg.ll
index c3f4ebe3..0223515 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg.ll
@@ -52,16 +52,15 @@ define amdgpu_kernel void @s_fneg_v2f32(ptr addrspace(1) nocapture %out, <2 x fl
; SI-LABEL: s_fneg_v2f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_xor_b32 s0, s3, 0x80000000
-; SI-NEXT: s_xor_b32 s1, s2, 0x80000000
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_xor_b32 s5, s5, 0x80000000
+; SI-NEXT: s_xor_b32 s4, s4, 0x80000000
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_fneg_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
index 7ab8b30..0c5ed00 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
@@ -88,27 +88,24 @@ define amdgpu_kernel void @fp_to_sint_v2i32(ptr addrspace(1) %out, <2 x float> %
; SI-LABEL: fp_to_sint_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_cvt_i32_f32_e32 v1, s3
-; SI-NEXT: v_cvt_i32_f32_e32 v0, s2
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cvt_i32_f32_e32 v1, s5
+; SI-NEXT: v_cvt_i32_f32_e32 v0, s4
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fp_to_sint_v2i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_cvt_i32_f32_e32 v1, s3
; VI-NEXT: v_cvt_i32_f32_e32 v0, s2
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_sint_v2i32:
@@ -294,26 +291,25 @@ entry:
define amdgpu_kernel void @fp_to_sint_v2i64(ptr addrspace(1) %out, <2 x float> %x) {
; SI-LABEL: fp_to_sint_v2i64:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s6, 0x2f800000
+; SI-NEXT: s_mov_b32 s7, 0xcf800000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: s_mov_b32 s8, 0x2f800000
-; SI-NEXT: s_mov_b32 s9, 0xcf800000
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s0, s4
-; SI-NEXT: s_mov_b32 s1, s5
-; SI-NEXT: v_trunc_f32_e32 v0, s7
-; SI-NEXT: v_trunc_f32_e32 v1, s6
-; SI-NEXT: v_mul_f32_e64 v2, |v0|, s8
+; SI-NEXT: v_trunc_f32_e32 v0, s5
+; SI-NEXT: v_trunc_f32_e32 v1, s4
+; SI-NEXT: v_mul_f32_e64 v2, |v0|, s6
; SI-NEXT: v_ashrrev_i32_e32 v3, 31, v0
-; SI-NEXT: v_mul_f32_e64 v4, |v1|, s8
+; SI-NEXT: v_mul_f32_e64 v4, |v1|, s6
; SI-NEXT: v_ashrrev_i32_e32 v5, 31, v1
; SI-NEXT: v_floor_f32_e32 v2, v2
; SI-NEXT: v_floor_f32_e32 v4, v4
; SI-NEXT: v_cvt_u32_f32_e32 v6, v2
-; SI-NEXT: v_fma_f32 v0, v2, s9, |v0|
+; SI-NEXT: v_fma_f32 v0, v2, s7, |v0|
; SI-NEXT: v_cvt_u32_f32_e32 v2, v4
-; SI-NEXT: v_fma_f32 v1, v4, s9, |v1|
+; SI-NEXT: v_fma_f32 v1, v4, s7, |v1|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
; SI-NEXT: v_xor_b32_e32 v4, v6, v3
; SI-NEXT: v_cvt_u32_f32_e32 v1, v1
@@ -330,36 +326,35 @@ define amdgpu_kernel void @fp_to_sint_v2i64(ptr addrspace(1) %out, <2 x float> %
; VI-LABEL: fp_to_sint_v2i64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s8, 0x2f800000
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s6, 0x2f800000
+; VI-NEXT: s_mov_b32 s7, 0xcf800000
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_trunc_f32_e32 v0, s3
-; VI-NEXT: v_mul_f32_e64 v1, |v0|, s8
-; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: v_trunc_f32_e32 v0, s5
+; VI-NEXT: v_mul_f32_e64 v1, |v0|, s6
; VI-NEXT: v_floor_f32_e32 v1, v1
-; VI-NEXT: s_mov_b32 s0, 0xcf800000
-; VI-NEXT: v_fma_f32 v2, v1, s0, |v0|
-; VI-NEXT: v_trunc_f32_e32 v4, s2
-; VI-NEXT: v_cvt_u32_f32_e32 v2, v2
-; VI-NEXT: v_mul_f32_e64 v3, |v4|, s8
-; VI-NEXT: v_cvt_u32_f32_e32 v1, v1
-; VI-NEXT: v_floor_f32_e32 v3, v3
-; VI-NEXT: v_cvt_u32_f32_e32 v5, v3
-; VI-NEXT: v_fma_f32 v3, v3, s0, |v4|
+; VI-NEXT: v_cvt_u32_f32_e32 v2, v1
+; VI-NEXT: v_fma_f32 v1, v1, s7, |v0|
; VI-NEXT: v_ashrrev_i32_e32 v0, 31, v0
-; VI-NEXT: v_cvt_u32_f32_e32 v6, v3
-; VI-NEXT: v_xor_b32_e32 v2, v2, v0
+; VI-NEXT: v_trunc_f32_e32 v4, s4
+; VI-NEXT: v_xor_b32_e32 v3, v2, v0
+; VI-NEXT: v_mul_f32_e64 v2, |v4|, s6
+; VI-NEXT: v_cvt_u32_f32_e32 v1, v1
+; VI-NEXT: v_floor_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v2
+; VI-NEXT: v_fma_f32 v2, v2, s7, |v4|
+; VI-NEXT: v_cvt_u32_f32_e32 v6, v2
; VI-NEXT: v_xor_b32_e32 v1, v1, v0
-; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v0
-; VI-NEXT: v_subb_u32_e32 v3, vcc, v1, v0, vcc
+; VI-NEXT: v_sub_u32_e32 v2, vcc, v1, v0
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v4
+; VI-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc
; VI-NEXT: v_xor_b32_e32 v0, v6, v1
; VI-NEXT: v_xor_b32_e32 v4, v5, v1
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
-; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_sint_v2i64:
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
index 5428ba8..c938475 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
@@ -48,27 +48,24 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i32(ptr addrspace(1) %out, <2 x
; SI-LABEL: fp_to_uint_v2f32_to_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_cvt_u32_f32_e32 v1, s3
-; SI-NEXT: v_cvt_u32_f32_e32 v0, s2
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cvt_u32_f32_e32 v1, s5
+; SI-NEXT: v_cvt_u32_f32_e32 v0, s4
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fp_to_uint_v2f32_to_v2i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_cvt_u32_f32_e32 v1, s3
; VI-NEXT: v_cvt_u32_f32_e32 v0, s2
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_uint_v2f32_to_v2i32:
@@ -241,32 +238,29 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(ptr addrspace(1) %out, <2 x
; SI-LABEL: fp_to_uint_v2f32_to_v2i64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s8, 0xcf800000
+; SI-NEXT: s_mov_b32 s6, 0xcf800000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_trunc_f32_e32 v0, s3
-; SI-NEXT: v_trunc_f32_e32 v2, s2
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_trunc_f32_e32 v0, s5
+; SI-NEXT: v_trunc_f32_e32 v2, s4
; SI-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; SI-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
; SI-NEXT: v_floor_f32_e32 v4, v1
; SI-NEXT: v_floor_f32_e32 v5, v3
; SI-NEXT: v_cvt_u32_f32_e32 v3, v4
; SI-NEXT: v_cvt_u32_f32_e32 v1, v5
-; SI-NEXT: v_fma_f32 v0, v4, s8, v0
-; SI-NEXT: v_fma_f32 v4, v5, s8, v2
+; SI-NEXT: v_fma_f32 v0, v4, s6, v0
+; SI-NEXT: v_fma_f32 v4, v5, s6, v2
; SI-NEXT: v_cvt_u32_f32_e32 v2, v0
; SI-NEXT: v_cvt_u32_f32_e32 v0, v4
-; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fp_to_uint_v2f32_to_v2i64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_trunc_f32_e32 v0, s3
; VI-NEXT: v_trunc_f32_e32 v4, s2
@@ -281,9 +275,9 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(ptr addrspace(1) %out, <2 x
; VI-NEXT: v_cvt_u32_f32_e32 v3, v5
; VI-NEXT: v_cvt_u32_f32_e32 v1, v6
; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_uint_v2f32_to_v2i64:
diff --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll
index ed1ee45..68b95cd 100644
--- a/llvm/test/CodeGen/AMDGPU/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshl.ll
@@ -691,17 +691,16 @@ define amdgpu_kernel void @orxor2or1(ptr addrspace(1) %in, i32 %a, i32 %b) {
; SI-LABEL: orxor2or1:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_lshl_b32 s0, s2, 7
-; SI-NEXT: s_or_b32 s0, s3, s0
-; SI-NEXT: s_cmp_eq_u32 s0, 0
-; SI-NEXT: s_cselect_b32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_lshl_b32 s6, s4, 7
+; SI-NEXT: s_or_b32 s6, s5, s6
+; SI-NEXT: s_cmp_eq_u32 s6, 0
+; SI-NEXT: s_cselect_b32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: orxor2or1:
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 44bd409..7cbf9ae 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1508,35 +1508,33 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2
; SI-LABEL: dynamic_insertelement_v2i16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; SI-NEXT: s_mov_b32 s7, 0x100f000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_lshl_b32 s1, s3, 4
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_xor_b32 s0, s2, 0x50005
-; SI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; SI-NEXT: s_and_b32 s0, s0, s1
-; SI-NEXT: s_xor_b32 s0, s0, s2
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_lshl_b32 s5, s5, 4
+; SI-NEXT: s_xor_b32 s6, s4, 0x50005
+; SI-NEXT: s_lshl_b32 s5, 0xffff, s5
+; SI-NEXT: s_and_b32 s5, s6, s5
+; SI-NEXT: s_xor_b32 s4, s5, s4
+; SI-NEXT: s_mov_b32 s3, 0x100f000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: dynamic_insertelement_v2i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; VI-NEXT: s_mov_b32 s7, 0x1100f000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshl_b32 s1, s3, 4
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_xor_b32 s0, s2, 0x50005
-; VI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; VI-NEXT: s_and_b32 s0, s0, s1
-; VI-NEXT: s_xor_b32 s0, s0, s2
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshl_b32 s5, s5, 4
+; VI-NEXT: s_xor_b32 s6, s4, 0x50005
+; VI-NEXT: s_lshl_b32 s5, 0xffff, s5
+; VI-NEXT: s_and_b32 s5, s6, s5
+; VI-NEXT: s_xor_b32 s4, s5, s4
+; VI-NEXT: s_mov_b32 s3, 0x1100f000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%vecins = insertelement <2 x i16> %a, i16 5, i32 %b
store <2 x i16> %vecins, ptr addrspace(1) %out, align 8
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
index d4aa2051..e421e2c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
@@ -1612,29 +1612,27 @@ define amdgpu_kernel void @v_lshr_and(ptr addrspace(1) %out, i32 %a, i32 %b) #0
; SI-LABEL: v_lshr_and:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshr_b32 s2, s2, s3
-; SI-NEXT: s_and_b32 s2, s2, 7
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_lshr_b32 s2, s4, s5
+; SI-NEXT: s_and_b32 s4, s2, 7
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: v_lshr_and:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_lshr_b32 s0, s2, s3
-; VI-NEXT: s_and_b32 s0, s0, 7
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshr_b32 s4, s4, s5
+; VI-NEXT: s_and_b32 s4, s4, 7
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%c = lshr i32 %a, %b
%d = and i32 %c, 7
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index ac356fa..3897a0e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -520,42 +520,41 @@ define amdgpu_kernel void @s_exp_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
;
; SI-SDAG-LABEL: s_exp_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x3fb8aa3b
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x32a5705f
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_mul_f32_e32 v2, s7, v0
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_mul_f32_e32 v2, s5, v0
; SI-SDAG-NEXT: v_rndne_f32_e32 v3, v2
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v0, -v2
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v0, -v2
; SI-SDAG-NEXT: v_sub_f32_e32 v2, v2, v3
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v1, v4
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v1, v4
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v4
-; SI-SDAG-NEXT: v_mul_f32_e32 v5, s6, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_mul_f32_e32 v5, s4, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v3, v3
+; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
; SI-SDAG-NEXT: v_rndne_f32_e32 v6, v5
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v0, -v5
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v0, -v5
; SI-SDAG-NEXT: v_sub_f32_e32 v7, v5, v6
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v1, v0
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v1, v0
; SI-SDAG-NEXT: v_add_f32_e32 v0, v7, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v5, v6
; SI-SDAG-NEXT: v_ldexp_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v3, 0xc2ce8ed0
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v4, 0x42b17218
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
; SI-SDAG-NEXT: v_mov_b32_e32 v6, 0x7f800000
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v4
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc
; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v5
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v4
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v4
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index d12ebe4..3928ec2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -522,42 +522,41 @@ define amdgpu_kernel void @s_exp10_v2f32(ptr addrspace(1) %out, <2 x float> %in)
;
; SI-SDAG-LABEL: s_exp10_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x40549a78
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x33979a37
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_mul_f32_e32 v2, s7, v0
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_mul_f32_e32 v2, s5, v0
; SI-SDAG-NEXT: v_rndne_f32_e32 v3, v2
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v0, -v2
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v0, -v2
; SI-SDAG-NEXT: v_sub_f32_e32 v2, v2, v3
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v1, v4
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v1, v4
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v4
-; SI-SDAG-NEXT: v_mul_f32_e32 v5, s6, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_mul_f32_e32 v5, s4, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v3, v3
+; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
; SI-SDAG-NEXT: v_rndne_f32_e32 v6, v5
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v0, -v5
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v0, -v5
; SI-SDAG-NEXT: v_sub_f32_e32 v7, v5, v6
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v1, v0
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v1, v0
; SI-SDAG-NEXT: v_add_f32_e32 v0, v7, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v5, v6
; SI-SDAG-NEXT: v_ldexp_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v3, 0xc23369f4
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v4, 0x421a209b
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
; SI-SDAG-NEXT: v_mov_b32_e32 v6, 0x7f800000
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v4
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc
; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v5
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v4
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v4
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
index e30a586..dd44a1a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
@@ -176,26 +176,25 @@ define amdgpu_kernel void @s_exp2_v2f32(ptr addrspace(1) %out, <2 x float> %in)
; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0xc2fc0000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x42800000
-; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s6, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s3, v0
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, s0
-; SI-SDAG-NEXT: s_mov_b32 s5, s1
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: v_add_f32_e32 v2, s3, v2
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s2, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
+; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-SDAG-NEXT: v_add_f32_e32 v0, s2, v0
+; SI-SDAG-NEXT: v_add_f32_e32 v2, s5, v2
+; SI-SDAG-NEXT: v_add_f32_e32 v0, s4, v0
+; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: s_cselect_b32 s0, 0xffffffc0, 0
-; SI-SDAG-NEXT: v_ldexp_f32_e64 v1, v2, s0
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: s_cselect_b32 s0, 0xffffffc0, 0
-; SI-SDAG-NEXT: v_ldexp_f32_e64 v0, v0, s0
-; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-SDAG-NEXT: s_cselect_b32 s6, 0xffffffc0, 0
+; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-SDAG-NEXT: s_cselect_b32 s4, 0xffffffc0, 0
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
+; SI-SDAG-NEXT: v_ldexp_f32_e64 v1, v2, s6
+; SI-SDAG-NEXT: v_ldexp_f32_e64 v0, v0, s4
+; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: s_exp2_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index b5038c8..fc6b2d9 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -321,39 +321,38 @@ define amdgpu_kernel void @s_log_f32(ptr addrspace(1) %out, float %in) {
define amdgpu_kernel void @s_log_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
; SI-SDAG-LABEL: s_log_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x41b17218
-; SI-SDAG-NEXT: s_mov_b32 s8, 0x3377d1cf
+; SI-SDAG-NEXT: s_mov_b32 s8, 0x3f317217
; SI-SDAG-NEXT: s_mov_b32 s9, 0x7f800000
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s7, v0
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: s_cselect_b32 s0, 32, 0
-; SI-SDAG-NEXT: v_mov_b32_e32 v3, s0
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s7, v3
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0
+; SI-SDAG-NEXT: s_and_b64 s[2:3], vcc, exec
+; SI-SDAG-NEXT: s_cselect_b32 s2, 32, 0
+; SI-SDAG-NEXT: v_mov_b32_e32 v3, s2
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s5, v3
; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s6, v0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-SDAG-NEXT: s_mov_b32 s7, 0x3f317217
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0
+; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-SDAG-NEXT: v_mul_f32_e32 v4, 0x3f317217, v3
-; SI-SDAG-NEXT: s_cselect_b32 s4, 32, 0
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s7, -v4
+; SI-SDAG-NEXT: s_cselect_b32 s6, 32, 0
+; SI-SDAG-NEXT: s_mov_b32 s5, 0x3377d1cf
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, -v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, s4
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, v5
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s6, v1
+; SI-SDAG-NEXT: v_mov_b32_e32 v1, s6
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s5, v5
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s4, v1
; SI-SDAG-NEXT: v_add_f32_e32 v4, v4, v5
; SI-SDAG-NEXT: v_log_f32_e32 v5, v1
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v3|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; SI-SDAG-NEXT: v_sub_f32_e32 v1, v1, v2
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3f317217, v5
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s7, -v2
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, v3
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, -v2
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s5, v3
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v5|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 7465b49..a141bce 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -321,39 +321,38 @@ define amdgpu_kernel void @s_log10_f32(ptr addrspace(1) %out, float %in) {
define amdgpu_kernel void @s_log10_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
; SI-SDAG-LABEL: s_log10_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x411a209b
-; SI-SDAG-NEXT: s_mov_b32 s8, 0x3284fbcf
+; SI-SDAG-NEXT: s_mov_b32 s8, 0x3e9a209a
; SI-SDAG-NEXT: s_mov_b32 s9, 0x7f800000
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s7, v0
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: s_cselect_b32 s0, 32, 0
-; SI-SDAG-NEXT: v_mov_b32_e32 v3, s0
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s7, v3
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0
+; SI-SDAG-NEXT: s_and_b64 s[2:3], vcc, exec
+; SI-SDAG-NEXT: s_cselect_b32 s2, 32, 0
+; SI-SDAG-NEXT: v_mov_b32_e32 v3, s2
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s5, v3
; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s6, v0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-SDAG-NEXT: s_mov_b32 s7, 0x3e9a209a
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0
+; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-SDAG-NEXT: v_mul_f32_e32 v4, 0x3e9a209a, v3
-; SI-SDAG-NEXT: s_cselect_b32 s4, 32, 0
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s7, -v4
+; SI-SDAG-NEXT: s_cselect_b32 s6, 32, 0
+; SI-SDAG-NEXT: s_mov_b32 s5, 0x3284fbcf
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, -v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, s4
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, v5
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s6, v1
+; SI-SDAG-NEXT: v_mov_b32_e32 v1, s6
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s5, v5
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s4, v1
; SI-SDAG-NEXT: v_add_f32_e32 v4, v4, v5
; SI-SDAG-NEXT: v_log_f32_e32 v5, v1
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v3|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; SI-SDAG-NEXT: v_sub_f32_e32 v1, v1, v2
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3e9a209a, v5
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s7, -v2
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, v3
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, -v2
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s5, v3
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v5|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
index 61a777f..b1407d3 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
@@ -221,8 +221,6 @@ define amdgpu_kernel void @s_log2_v2f32(ptr addrspace(1) %out, <2 x float> %in)
; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x42000000
-; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s6, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s3, v0
; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
@@ -238,11 +236,11 @@ define amdgpu_kernel void @s_log2_v2f32(ptr addrspace(1) %out, <2 x float> %in)
; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s2, v1
; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_log_f32_e32 v4, v1
-; SI-SDAG-NEXT: s_mov_b32 s4, s0
-; SI-SDAG-NEXT: s_mov_b32 s5, s1
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: v_sub_f32_e32 v1, v3, v2
; SI-SDAG-NEXT: v_sub_f32_e32 v0, v4, v0
-; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: s_log2_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 6dc9199..b6eaaf1 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -326,12 +326,12 @@ define void @local_atomic_fadd_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -347,12 +347,12 @@ define void @local_atomic_fadd_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB2_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -440,12 +440,12 @@ define void @local_atomic_fadd_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB3_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -462,12 +462,12 @@ define void @local_atomic_fadd_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB3_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -880,13 +880,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], 4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], 4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -913,13 +914,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -936,14 +938,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB6_1
@@ -968,13 +970,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB6_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -990,13 +992,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB6_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1012,13 +1014,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB6_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1034,13 +1036,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v3, v1
+; GFX6-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v2, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB6_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1063,13 +1065,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], 4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], 4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -1096,13 +1099,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1119,14 +1123,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB7_1
@@ -1151,13 +1155,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1173,13 +1177,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1195,13 +1199,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB7_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1218,13 +1222,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[0:1], 4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v2, v[0:1], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v1
+; GFX6-NEXT: v_mov_b32_e32 v3, v0
+; GFX6-NEXT: v_add_f64 v[0:1], v[3:4], 4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[0:1]
-; GFX6-NEXT: v_mov_b32_e32 v0, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB7_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2032,27 +2036,27 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2073,28 +2077,28 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2119,15 +2123,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2140,27 +2144,27 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2175,28 +2179,28 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2211,23 +2215,23 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -2249,15 +2253,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2278,15 +2282,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2308,16 +2312,16 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2338,18 +2342,18 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2370,18 +2374,18 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2412,19 +2416,19 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2455,19 +2459,20 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2493,15 +2498,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2524,19 +2529,19 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2561,19 +2566,20 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2596,16 +2602,16 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -2628,15 +2634,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2658,15 +2664,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2689,16 +2695,16 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2720,18 +2726,18 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2753,18 +2759,18 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3086,16 +3092,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3118,16 +3124,17 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -3147,13 +3154,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3168,16 +3175,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3194,16 +3201,17 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3220,15 +3228,15 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -3245,13 +3253,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3267,13 +3275,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3289,14 +3297,14 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3312,16 +3320,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3338,16 +3346,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4297,38 +4305,38 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4349,37 +4357,37 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4405,22 +4413,22 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4433,38 +4441,38 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4479,37 +4487,37 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4524,28 +4532,28 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -4568,20 +4576,20 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4603,20 +4611,20 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4638,22 +4646,22 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4674,18 +4682,18 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4706,18 +4714,18 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4748,29 +4756,30 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4801,28 +4810,29 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4849,22 +4859,22 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4888,28 +4898,29 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4935,27 +4946,28 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4978,21 +4990,21 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -5016,20 +5028,20 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5052,20 +5064,20 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5088,22 +5100,22 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5125,18 +5137,18 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5158,18 +5170,18 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5569,26 +5581,27 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5611,25 +5624,26 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5650,21 +5664,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5680,25 +5694,26 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5716,24 +5731,25 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5750,21 +5766,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -5782,20 +5798,20 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5812,20 +5828,20 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5841,21 +5857,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5871,16 +5887,16 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5897,16 +5913,16 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6399,13 +6415,14 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6422,13 +6439,13 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6444,12 +6461,12 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6464,12 +6481,12 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6485,14 +6502,14 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6614,13 +6631,14 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6637,13 +6655,13 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -6659,12 +6677,12 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6679,12 +6697,12 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6700,14 +6718,14 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7547,30 +7565,32 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7591,30 +7611,32 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7634,27 +7656,27 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_add_f32_e32 v3, v3, v2
; GFX10-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -7674,26 +7696,26 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_add_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7712,26 +7734,26 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_add_f32_e32 v3, v3, v2
; GFX908-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7749,29 +7771,29 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_add_f32_e32 v3, v3, v2
; GFX8-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7888,30 +7910,32 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7932,30 +7956,32 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7975,27 +8001,27 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_add_f32_e32 v3, v3, v2
; GFX10-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -8015,26 +8041,26 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_add_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8053,26 +8079,26 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_add_f32_e32 v3, v3, v2
; GFX908-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8090,29 +8116,29 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_add_f32_e32 v3, v3, v2
; GFX8-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8849,20 +8875,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX7-NEXT: ; %bb.5:
; GFX7-NEXT: s_lshl_b32 s0, s3, 4
; GFX7-NEXT: v_mov_b32_e32 v1, s0
-; GFX7-NEXT: ds_read_b32 v3, v1
+; GFX7-NEXT: ds_read_b32 v2, v1
; GFX7-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB28_6: ; %atomicrmw.start2
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX7-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB28_6
; GFX7-NEXT: .LBB28_7: ; %Flow21
@@ -8973,20 +8999,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX6-NEXT: ; %bb.5:
; GFX6-NEXT: s_lshl_b32 s0, s3, 4
; GFX6-NEXT: v_mov_b32_e32 v1, s0
-; GFX6-NEXT: ds_read_b32 v3, v1
+; GFX6-NEXT: ds_read_b32 v2, v1
; GFX6-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB28_6: ; %atomicrmw.start2
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX6-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB28_6
; GFX6-NEXT: .LBB28_7: ; %Flow19
@@ -9677,20 +9703,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX7-NEXT: ; %bb.5:
; GFX7-NEXT: s_lshl_b32 s0, s3, 4
; GFX7-NEXT: v_mov_b32_e32 v1, s0
-; GFX7-NEXT: ds_read_b32 v3, v1
+; GFX7-NEXT: ds_read_b32 v2, v1
; GFX7-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB29_6: ; %atomicrmw.start2
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX7-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB29_6
; GFX7-NEXT: .LBB29_7: ; %Flow21
@@ -9801,20 +9827,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX6-NEXT: ; %bb.5:
; GFX6-NEXT: s_lshl_b32 s0, s3, 4
; GFX6-NEXT: v_mov_b32_e32 v1, s0
-; GFX6-NEXT: ds_read_b32 v3, v1
+; GFX6-NEXT: ds_read_b32 v2, v1
; GFX6-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB29_6: ; %atomicrmw.start2
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX6-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB29_6
; GFX6-NEXT: .LBB29_7: ; %Flow19
@@ -10084,12 +10110,12 @@ define void @local_atomic_fadd_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB31_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -10105,12 +10131,12 @@ define void @local_atomic_fadd_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX6-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB31_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
index d6b7d8f..8e094a7 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
@@ -1598,29 +1598,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -1641,29 +1641,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -1688,16 +1688,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1710,29 +1710,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1747,29 +1747,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1784,24 +1784,24 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -1823,16 +1823,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1853,16 +1853,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1884,17 +1884,17 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1915,18 +1915,18 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1947,18 +1947,18 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1989,20 +1989,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2033,21 +2034,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2073,16 +2074,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2105,20 +2106,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2143,21 +2145,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2180,17 +2182,17 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -2213,16 +2215,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2244,16 +2246,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2276,17 +2278,17 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2308,18 +2310,18 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2341,18 +2343,18 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2685,17 +2687,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, 4.0, v2.l
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, 4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2718,18 +2721,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2749,14 +2752,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX942-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX942-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2771,17 +2774,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, 4.0, v2.l
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, 4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2798,18 +2802,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2826,16 +2830,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX10-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -2852,14 +2856,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX90A-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX90A-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2875,14 +2879,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX908-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX908-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2898,15 +2902,15 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2922,16 +2926,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2948,16 +2952,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3911,38 +3915,38 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3963,37 +3967,37 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4019,22 +4023,22 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4047,38 +4051,38 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4093,37 +4097,37 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4138,28 +4142,28 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -4182,20 +4186,20 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4217,20 +4221,20 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4252,22 +4256,22 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4288,19 +4292,19 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4321,19 +4325,19 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4364,29 +4368,30 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4417,28 +4422,29 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4465,22 +4471,22 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4504,28 +4510,29 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4551,27 +4558,28 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4594,21 +4602,21 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -4632,20 +4640,20 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4668,20 +4676,20 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4704,22 +4712,22 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4741,19 +4749,19 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4775,19 +4783,19 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5189,26 +5197,27 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5231,25 +5240,26 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5270,21 +5280,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5300,25 +5310,26 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5336,24 +5347,25 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5370,21 +5382,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -5402,20 +5414,20 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5432,20 +5444,20 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5461,21 +5473,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5491,17 +5503,17 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5518,17 +5530,17 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX6-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6101,15 +6113,15 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6129,14 +6141,14 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX942-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB22_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6152,15 +6164,15 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_max_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6178,14 +6190,14 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_max_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6202,13 +6214,13 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6224,13 +6236,13 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6248,16 +6260,16 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_max_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_max_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6363,15 +6375,15 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6391,14 +6403,14 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX942-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB23_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6414,15 +6426,15 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_max_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6440,14 +6452,14 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_max_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -6464,13 +6476,13 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6486,13 +6498,13 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6510,16 +6522,16 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_max_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_max_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7589,31 +7601,34 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -7638,32 +7653,33 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -7686,27 +7702,27 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_max_f32_e32 v3, v3, v2
; GFX942-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB26_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7724,30 +7740,32 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7768,30 +7786,32 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7811,27 +7831,27 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v2
; GFX10-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -7851,26 +7871,26 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_max_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7889,26 +7909,26 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_max_f32_e32 v3, v3, v2
; GFX908-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7926,29 +7946,29 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v2
; GFX8-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8047,31 +8067,34 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8096,32 +8119,33 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8144,27 +8168,27 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_max_f32_e32 v3, v3, v2
; GFX942-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB27_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8182,30 +8206,32 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8226,30 +8252,32 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -8269,27 +8297,27 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v2
; GFX10-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -8309,26 +8337,26 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_max_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8347,26 +8375,26 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_max_f32_e32 v3, v3, v2
; GFX908-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8384,29 +8412,29 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v2
; GFX8-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
index 11ed43d..0aa8d33 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
@@ -1598,29 +1598,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -1641,29 +1641,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -1688,16 +1688,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1710,29 +1710,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1747,29 +1747,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1784,24 +1784,24 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -1823,16 +1823,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1853,16 +1853,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1884,17 +1884,17 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1915,18 +1915,18 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1947,18 +1947,18 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1989,20 +1989,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
-; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2033,21 +2034,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2073,16 +2074,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2105,20 +2106,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
-; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2143,21 +2145,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2180,17 +2182,17 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -2213,16 +2215,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2244,16 +2246,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2276,17 +2278,17 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2308,18 +2310,18 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2341,18 +2343,18 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2685,17 +2687,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v2.l, 4.0, v2.l
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v1.l, 4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2718,18 +2721,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2749,14 +2752,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX942-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX942-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2771,17 +2774,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f16_e32 v2.l, 4.0, v2.l
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v1.l, 4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2798,18 +2802,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2826,16 +2830,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX10-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX10-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -2852,14 +2856,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX90A-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX90A-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2875,14 +2879,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX908-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX908-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2898,15 +2902,15 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2922,16 +2926,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2948,16 +2952,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3911,38 +3915,38 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3963,37 +3967,37 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4019,22 +4023,22 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4047,38 +4051,38 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4093,37 +4097,37 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4138,28 +4142,28 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -4182,20 +4186,20 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4217,20 +4221,20 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4252,22 +4256,22 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4288,19 +4292,19 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4321,19 +4325,19 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4364,29 +4368,30 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4417,28 +4422,29 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4465,22 +4471,22 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4504,28 +4510,29 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4551,27 +4558,28 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4594,21 +4602,21 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -4632,20 +4640,20 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4668,20 +4676,20 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4704,22 +4712,22 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4741,19 +4749,19 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4775,19 +4783,19 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5189,26 +5197,27 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5231,25 +5240,26 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5270,21 +5280,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5300,25 +5310,26 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5336,24 +5347,25 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5370,21 +5382,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -5402,20 +5414,20 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5432,20 +5444,20 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5461,21 +5473,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5491,17 +5503,17 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5518,17 +5530,17 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX6-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6101,15 +6113,15 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_min_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6129,14 +6141,14 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX942-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB22_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6152,15 +6164,15 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_min_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6178,14 +6190,14 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_min_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6202,13 +6214,13 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6224,13 +6236,13 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6248,16 +6260,16 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_min_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_min_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_min_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6363,15 +6375,15 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_min_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6391,14 +6403,14 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX942-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB23_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6414,15 +6426,15 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_min_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6440,14 +6452,14 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_min_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -6464,13 +6476,13 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6486,13 +6498,13 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6510,16 +6522,16 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_min_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_min_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_min_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7589,31 +7601,34 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -7638,32 +7653,33 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -7686,27 +7702,27 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_min_f32_e32 v3, v3, v2
; GFX942-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB26_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7724,30 +7740,32 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7768,30 +7786,32 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7811,27 +7831,27 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v2
; GFX10-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -7851,26 +7871,26 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_min_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7889,26 +7909,26 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_min_f32_e32 v3, v3, v2
; GFX908-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7926,29 +7946,29 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v2
; GFX8-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8047,31 +8067,34 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8096,32 +8119,33 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8144,27 +8168,27 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_min_f32_e32 v3, v3, v2
; GFX942-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB27_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8182,30 +8206,32 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8226,30 +8252,32 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -8269,27 +8297,27 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v2
; GFX10-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -8309,26 +8337,26 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_min_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8347,26 +8375,26 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_min_f32_e32 v3, v3, v2
; GFX908-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8384,29 +8412,29 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v2
; GFX8-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
index d74338c..929bb61 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
@@ -453,13 +453,14 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX12-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX12-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -478,12 +479,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -498,13 +499,14 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX11-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -521,13 +523,13 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_1
@@ -543,12 +545,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -563,12 +565,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -584,12 +586,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -605,12 +607,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -626,12 +628,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB2_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -654,13 +656,14 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX12-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -679,12 +682,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB3_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -699,13 +702,14 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -722,13 +726,13 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB3_1
@@ -744,12 +748,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB3_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -764,12 +768,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB3_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -785,12 +789,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB3_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -806,12 +810,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB3_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -828,12 +832,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB3_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1296,13 +1300,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], -4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], -4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -1321,12 +1326,12 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX942-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5]
+; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX942-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX942-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b64_e32 v[2:3], v[4:5]
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB6_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1341,13 +1346,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1364,14 +1370,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB6_1
@@ -1387,12 +1393,12 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB6_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1407,13 +1413,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB6_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1429,13 +1435,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB6_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1451,13 +1457,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB6_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1473,13 +1479,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v3, v1
+; GFX6-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v2, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB6_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1502,13 +1508,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], -4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], -4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -1527,12 +1534,12 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX942-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] offset:65528
+; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX942-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX942-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] offset:65528
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b64_e32 v[2:3], v[4:5]
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB7_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1547,13 +1554,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1570,14 +1578,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB7_1
@@ -1593,12 +1601,12 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] offset:65528
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] offset:65528
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB7_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1613,13 +1621,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1635,13 +1643,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1657,13 +1665,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB7_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1680,13 +1688,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[0:1], -4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v2, v[0:1], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v1
+; GFX6-NEXT: v_mov_b32_e32 v3, v0
+; GFX6-NEXT: v_add_f64 v[0:1], v[3:4], -4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[0:1]
-; GFX6-NEXT: v_mov_b32_e32 v0, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB7_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2494,27 +2502,27 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2535,28 +2543,28 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2581,15 +2589,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2602,27 +2610,27 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2637,28 +2645,28 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2673,23 +2681,23 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -2711,15 +2719,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2740,15 +2748,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2770,16 +2778,16 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2800,18 +2808,18 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2832,18 +2840,18 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2874,19 +2882,19 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2917,19 +2925,20 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2955,15 +2964,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2986,19 +2995,19 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3023,19 +3032,20 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3058,16 +3068,16 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -3090,15 +3100,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3120,15 +3130,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3151,16 +3161,16 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3182,18 +3192,18 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3215,18 +3225,18 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3548,16 +3558,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3580,16 +3590,17 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -3609,13 +3620,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3630,16 +3641,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3656,16 +3667,17 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3682,15 +3694,15 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -3707,13 +3719,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3729,13 +3741,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3751,14 +3763,14 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3774,16 +3786,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3800,16 +3812,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4759,38 +4771,38 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4811,37 +4823,37 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4867,22 +4879,22 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4895,38 +4907,38 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4941,37 +4953,37 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4986,28 +4998,28 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -5030,20 +5042,20 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5065,20 +5077,20 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5100,22 +5112,22 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5136,18 +5148,18 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5168,18 +5180,18 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5210,29 +5222,30 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5263,28 +5276,29 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5311,22 +5325,22 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5350,28 +5364,29 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5397,27 +5412,28 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5440,21 +5456,21 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -5478,20 +5494,20 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5514,20 +5530,20 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5550,22 +5566,22 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5587,18 +5603,18 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5620,18 +5636,18 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6031,26 +6047,27 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -6073,25 +6090,26 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -6112,21 +6130,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6142,25 +6160,26 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6178,24 +6197,25 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6212,21 +6232,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -6244,20 +6264,20 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6274,20 +6294,20 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6303,21 +6323,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6333,16 +6353,16 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6359,16 +6379,16 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6906,13 +6926,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6931,12 +6952,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB22_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6951,13 +6972,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6974,13 +6996,13 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6996,12 +7018,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7016,12 +7038,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7037,14 +7059,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7149,13 +7171,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -7174,12 +7197,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB23_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7194,13 +7217,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7217,13 +7241,13 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -7239,12 +7263,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7259,12 +7283,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7280,14 +7304,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8357,31 +8381,34 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8406,32 +8433,33 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8454,27 +8482,27 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX942-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB26_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8492,30 +8520,32 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8536,30 +8566,32 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -8579,27 +8611,27 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX10-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -8619,26 +8651,26 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8657,26 +8689,26 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX908-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8694,29 +8726,29 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX8-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8815,31 +8847,34 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8864,32 +8899,33 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8912,27 +8948,27 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX942-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB27_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8950,30 +8986,32 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8994,30 +9032,32 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -9037,27 +9077,27 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX10-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -9077,26 +9117,26 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9115,26 +9155,26 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX908-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9152,29 +9192,29 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX8-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9489,13 +9529,14 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX12-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -9514,12 +9555,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX942-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB29_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9534,13 +9575,14 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -9557,13 +9599,13 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX10-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB29_1
@@ -9579,12 +9621,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB29_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9599,12 +9641,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX908-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB29_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9620,12 +9662,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB29_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9641,12 +9683,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB29_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9662,12 +9704,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX6-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB29_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
index 68506ce..9056d40 100644
--- a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
@@ -36,20 +36,19 @@ define amdgpu_kernel void @s_lshr_v2i16(ptr addrspace(1) %out, <2 x i16> %lhs, <
; CI-LABEL: s_lshr_v2i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; CI-NEXT: s_mov_b32 s7, 0xf000
-; CI-NEXT: s_mov_b32 s6, -1
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_mov_b32 s4, s0
-; CI-NEXT: s_mov_b32 s5, s1
-; CI-NEXT: s_and_b32 s0, s2, 0xffff
-; CI-NEXT: s_lshr_b32 s1, s2, 16
-; CI-NEXT: s_lshr_b32 s2, s3, 16
-; CI-NEXT: s_lshr_b32 s1, s1, s2
-; CI-NEXT: s_lshl_b32 s1, s1, 16
-; CI-NEXT: s_lshr_b32 s0, s0, s3
-; CI-NEXT: s_or_b32 s0, s0, s1
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: s_and_b32 s6, s4, 0xffff
+; CI-NEXT: s_lshr_b32 s4, s4, 16
+; CI-NEXT: s_lshr_b32 s7, s5, 16
+; CI-NEXT: s_lshr_b32 s4, s4, s7
+; CI-NEXT: s_lshl_b32 s4, s4, 16
+; CI-NEXT: s_lshr_b32 s5, s6, s5
+; CI-NEXT: s_or_b32 s4, s5, s4
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; GFX10-LABEL: s_lshr_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
index 680942fcb..9ecd35e 100644
--- a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
+++ b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
@@ -133,7 +133,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: ; %bb.3:
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s4
; CHECK-NEXT: v_add_nc_u32_e32 v45, -1, v42
-; CHECK-NEXT: s_mov_b32 s53, 0
+; CHECK-NEXT: s_mov_b32 s55, 0
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v45
; CHECK-NEXT: s_and_b32 exec_lo, exec_lo, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB0_25
@@ -141,7 +141,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: v_lshlrev_b32_e32 v43, 10, v43
; CHECK-NEXT: v_add_nc_u32_e32 v46, 0x3c05, v0
; CHECK-NEXT: v_mov_b32_e32 v47, 0
-; CHECK-NEXT: s_mov_b32 s55, 0
+; CHECK-NEXT: s_mov_b32 s53, 0
; CHECK-NEXT: .LBB0_5: ; =>This Loop Header: Depth=1
; CHECK-NEXT: ; Child Loop BB0_8 Depth 2
; CHECK-NEXT: ; Child Loop BB0_20 Depth 2
@@ -866,8 +866,8 @@ define protected amdgpu_kernel void @kernel_round1_short(ptr addrspace(1) nocapt
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_mov_b32_e32 v41, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v42, 10, v42
-; CHECK-NEXT: s_mov_b32 s52, 0
; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: s_mov_b32 s52, 0
; CHECK-NEXT: ds_write_b8 v46, v43 offset:15364
; CHECK-NEXT: v_add_nc_u32_e32 v45, -1, v41
; CHECK-NEXT: .LBB1_1: ; %.37
diff --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index 46b8df4..9cc0e62 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -133,35 +133,33 @@ define amdgpu_kernel void @i16_mad24(ptr addrspace(1) %out, i16 %a, i16 %b, i16
; GCN-LABEL: i16_mad24:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_load_dword s4, s[4:5], 0xb
-; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_load_dword s6, s[4:5], 0xb
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_lshr_b32 s2, s2, 16
-; GCN-NEXT: s_mul_i32 s2, s4, s2
-; GCN-NEXT: s_add_i32 s2, s2, s3
-; GCN-NEXT: s_sext_i32_i16 s2, s2
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_lshr_b32 s2, s4, 16
+; GCN-NEXT: s_mul_i32 s2, s6, s2
+; GCN-NEXT: s_add_i32 s2, s2, s5
+; GCN-NEXT: s_sext_i32_i16 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
;
; GFX8-LABEL: i16_mad24:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dword s8, s[4:5], 0x2c
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_lshr_b32 s0, s2, 16
-; GFX8-NEXT: s_mul_i32 s0, s8, s0
-; GFX8-NEXT: s_add_i32 s0, s0, s3
-; GFX8-NEXT: s_sext_i32_i16 s0, s0
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_lshr_b32 s4, s4, 16
+; GFX8-NEXT: s_mul_i32 s4, s6, s4
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
entry:
%0 = mul i16 %a, %b
diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll
index ba53294..c48e25f3 100644
--- a/llvm/test/CodeGen/AMDGPU/max.ll
+++ b/llvm/test/CodeGen/AMDGPU/max.ll
@@ -155,14 +155,13 @@ define amdgpu_kernel void @s_test_imax_sge_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_imax_sge_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_i32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_i32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_imax_sge_i32:
@@ -357,16 +356,15 @@ define amdgpu_kernel void @s_test_imax_sgt_imm_v2i32(ptr addrspace(1) %out, <2 x
; SI-LABEL: s_test_imax_sgt_imm_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_max_i32 s0, s3, 9
-; SI-NEXT: s_max_i32 s1, s2, 9
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_i32 s5, s5, 9
+; SI-NEXT: s_max_i32 s4, s4, 9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_imax_sgt_imm_v2i32:
@@ -472,14 +470,13 @@ define amdgpu_kernel void @s_test_imax_sgt_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_imax_sgt_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_i32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_i32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_imax_sgt_i32:
@@ -582,14 +579,13 @@ define amdgpu_kernel void @s_test_umax_uge_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_umax_uge_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_u32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_u32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_umax_uge_i32:
@@ -817,14 +813,13 @@ define amdgpu_kernel void @s_test_umax_ugt_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_umax_ugt_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_u32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_u32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_umax_ugt_i32:
@@ -858,16 +853,15 @@ define amdgpu_kernel void @s_test_umax_ugt_imm_v2i32(ptr addrspace(1) %out, <2 x
; SI-LABEL: s_test_umax_ugt_imm_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_max_u32 s0, s3, 23
-; SI-NEXT: s_max_u32 s1, s2, 15
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_u32 s5, s5, 23
+; SI-NEXT: s_max_u32 s4, s4, 15
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_umax_ugt_imm_v2i32:
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index ca4f5d2..43752c2 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -90,18 +90,18 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: .LBB0_13: ; %loop-memcpy-expansion2
; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
-; CHECK-NEXT: v_mov_b32_e32 v6, s12
-; CHECK-NEXT: v_mov_b32_e32 v7, s13
+; CHECK-NEXT: v_mov_b32_e32 v6, s10
+; CHECK-NEXT: v_mov_b32_e32 v7, s11
; CHECK-NEXT: flat_load_dwordx4 v[10:13], v[6:7]
-; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s12, v8
-; CHECK-NEXT: s_add_u32 s12, s12, 16
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s10, v8
+; CHECK-NEXT: s_add_u32 s10, s10, 16
; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v9, v7, vcc
-; CHECK-NEXT: s_addc_u32 s13, s13, 0
-; CHECK-NEXT: v_cmp_ge_u64_e32 vcc, s[12:13], v[0:1]
-; CHECK-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CHECK-NEXT: s_addc_u32 s11, s11, 0
+; CHECK-NEXT: v_cmp_ge_u64_e32 vcc, s[10:11], v[0:1]
+; CHECK-NEXT: s_or_b64 s[12:13], vcc, s[12:13]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_dwordx4 v[6:7], v[10:13]
-; CHECK-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[12:13]
; CHECK-NEXT: s_cbranch_execnz .LBB0_13
; CHECK-NEXT: .LBB0_14: ; %Flow15
; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
@@ -115,8 +115,8 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: s_cbranch_execz .LBB0_9
; CHECK-NEXT: ; %bb.16: ; %loop-memcpy-residual4.preheader
; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
-; CHECK-NEXT: s_mov_b64 s[12:13], 0
; CHECK-NEXT: s_mov_b64 s[14:15], 0
+; CHECK-NEXT: s_mov_b64 s[12:13], 0
; CHECK-NEXT: .LBB0_17: ; %loop-memcpy-residual4
; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
index 14b0729..953511d 100644
--- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
@@ -10,13 +10,13 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-LABEL: memmove_p0_p0:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB0_3
@@ -33,10 +33,10 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB0_5: ; %memmove_fwd_main_loop
@@ -59,20 +59,20 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB0_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB0_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v4, v[2:3]
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v4
@@ -82,10 +82,10 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_cbranch_execnz .LBB0_8
; CHECK-NEXT: .LBB0_9: ; %Flow28
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB0_2
@@ -104,11 +104,11 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .LBB0_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v12, v[10:11]
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[4:5], v12
@@ -129,19 +129,19 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB0_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[4:5]
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
+; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB0_15
; CHECK-NEXT: .LBB0_16: ; %Flow32
@@ -158,13 +158,13 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-LABEL: memmove_p0_p1:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB1_3
@@ -181,10 +181,10 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB1_5: ; %memmove_fwd_main_loop
@@ -207,20 +207,20 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB1_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB1_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v4
@@ -230,10 +230,10 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_cbranch_execnz .LBB1_8
; CHECK-NEXT: .LBB1_9: ; %Flow30
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB1_2
@@ -252,11 +252,11 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .LBB1_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v4, s4, v4, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[10:11], v12
@@ -277,19 +277,19 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB1_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
+; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB1_15
; CHECK-NEXT: .LBB1_16: ; %Flow34
@@ -423,17 +423,17 @@ define void @memmove_p0_p3(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align
; CHECK-NEXT: .LBB2_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ds_read_b128 v[7:10], v2
-; CHECK-NEXT: v_add_co_u32 v3, vcc_lo, v5, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v6, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v11, vcc_lo, v0, v5
-; CHECK-NEXT: v_add_co_ci_u32_e64 v12, null, v1, v6, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[3:4]
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v3, v5
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
; CHECK-NEXT: v_add_nc_u32_e32 v2, -16, v2
-; CHECK-NEXT: v_mov_b32_e32 v5, v3
-; CHECK-NEXT: s_or_b32 s7, s4, s7
+; CHECK-NEXT: v_add_co_u32 v5, vcc_lo, v3, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v6, null, -1, v4, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v3, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, v1, v4, s4
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[5:6]
+; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[11:12], v[7:10]
+; CHECK-NEXT: flat_store_dwordx4 v[3:4], v[7:10]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB2_15
; CHECK-NEXT: .LBB2_16: ; %Flow36
@@ -450,13 +450,13 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-LABEL: memmove_p0_p4:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB3_3
@@ -473,10 +473,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB3_5: ; %memmove_fwd_main_loop
@@ -499,20 +499,20 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB3_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB3_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v4
@@ -522,10 +522,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_cbranch_execnz .LBB3_8
; CHECK-NEXT: .LBB3_9: ; %Flow29
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB3_2
@@ -544,11 +544,11 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .LBB3_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[10:11], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[4:5], v12
@@ -569,19 +569,19 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB3_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
+; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_15
; CHECK-NEXT: .LBB3_16: ; %Flow33
@@ -723,17 +723,17 @@ define void @memmove_p0_p5(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align
; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:4
; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:8
; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:12
-; CHECK-NEXT: v_add_co_u32 v3, vcc_lo, v5, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v6, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v11, vcc_lo, v0, v5
-; CHECK-NEXT: v_add_co_ci_u32_e64 v12, null, v1, v6, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[3:4]
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v3, v5
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
; CHECK-NEXT: v_add_nc_u32_e32 v2, -16, v2
-; CHECK-NEXT: v_mov_b32_e32 v5, v3
-; CHECK-NEXT: s_or_b32 s7, s4, s7
+; CHECK-NEXT: v_add_co_u32 v5, vcc_lo, v3, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v6, null, -1, v4, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v3, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, v1, v4, s4
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[5:6]
+; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[11:12], v[7:10]
+; CHECK-NEXT: flat_store_dwordx4 v[3:4], v[7:10]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB4_15
; CHECK-NEXT: .LBB4_16: ; %Flow36
@@ -751,13 +751,13 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-LABEL: memmove_p1_p0:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB5_3
@@ -773,10 +773,10 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB5_5: ; %memmove_fwd_main_loop
@@ -799,20 +799,20 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB5_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB5_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v4, v[2:3]
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: global_store_byte v[0:1], v4, off
@@ -822,10 +822,10 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_cbranch_execnz .LBB5_8
; CHECK-NEXT: .LBB5_9: ; %Flow30
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB5_2
@@ -844,11 +844,11 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .LBB5_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v12, v[10:11]
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: global_store_byte v[4:5], v12, off
@@ -869,19 +869,19 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB5_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[4:5]
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
+; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB5_15
; CHECK-NEXT: .LBB5_16: ; %Flow34
@@ -897,13 +897,13 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-LABEL: memmove_p1_p1:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB6_3
@@ -919,10 +919,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_5: ; %memmove_fwd_main_loop
@@ -945,20 +945,20 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB6_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[0:1], v4, off
@@ -968,10 +968,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_cbranch_execnz .LBB6_8
; CHECK-NEXT: .LBB6_9: ; %Flow32
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB6_2
@@ -990,11 +990,11 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .LBB6_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[10:11], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[4:5], v12, off
@@ -1015,19 +1015,19 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
+; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_15
; CHECK-NEXT: .LBB6_16: ; %Flow36
@@ -1109,13 +1109,13 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-LABEL: memmove_p1_p4:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB8_3
@@ -1131,10 +1131,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_5: ; %memmove_fwd_main_loop
@@ -1157,20 +1157,20 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB8_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[0:1], v4, off
@@ -1180,10 +1180,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_cbranch_execnz .LBB8_8
; CHECK-NEXT: .LBB8_9: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB8_2
@@ -1202,11 +1202,11 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .LBB8_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[10:11], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[4:5], v12, off
@@ -1227,19 +1227,19 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
+; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB8_15
; CHECK-NEXT: .LBB8_16: ; %Flow35
diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
index 3d9c2a2..10d4eb0 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
@@ -10,46 +10,43 @@ define amdgpu_kernel void @test_smul24_i32(ptr addrspace(1) %out, i32 %a, i32 %b
; SI-LABEL: test_smul24_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_bfe_i32 s2, s2, 0x180000
-; SI-NEXT: s_bfe_i32 s3, s3, 0x180000
-; SI-NEXT: s_mul_i32 s2, s2, s3
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_bfe_i32 s2, s4, 0x180000
+; SI-NEXT: s_bfe_i32 s4, s5, 0x180000
+; SI-NEXT: s_mul_i32 s4, s2, s4
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_smul24_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_bfe_i32 s0, s2, 0x180000
-; VI-NEXT: s_bfe_i32 s1, s3, 0x180000
-; VI-NEXT: s_mul_i32 s0, s0, s1
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_bfe_i32 s4, s4, 0x180000
+; VI-NEXT: s_bfe_i32 s5, s5, 0x180000
+; VI-NEXT: s_mul_i32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_smul24_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_bfe_i32 s0, s2, 0x180000
-; GFX9-NEXT: s_bfe_i32 s1, s3, 0x180000
-; GFX9-NEXT: s_mul_i32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_bfe_i32 s4, s4, 0x180000
+; GFX9-NEXT: s_bfe_i32 s5, s5, 0x180000
+; GFX9-NEXT: s_mul_i32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
; EG-LABEL: test_smul24_i32:
@@ -127,16 +124,15 @@ define amdgpu_kernel void @test_smulhi24_i64(ptr addrspace(1) %out, i32 %a, i32
; GFX9-LABEL: test_smulhi24_i64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_bfe_i32 s0, s2, 0x180000
-; GFX9-NEXT: s_bfe_i32 s1, s3, 0x180000
-; GFX9-NEXT: s_mul_hi_i32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_bfe_i32 s4, s4, 0x180000
+; GFX9-NEXT: s_bfe_i32 s5, s5, 0x180000
+; GFX9-NEXT: s_mul_hi_i32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
; EG-LABEL: test_smulhi24_i64:
@@ -464,29 +460,26 @@ define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b
; SI-LABEL: test_smul24_i33:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_bfe_i32 s0, s8, 0x180000
-; SI-NEXT: s_bfe_i32 s1, s2, 0x180000
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: s_mul_i32 s0, s1, s0
-; SI-NEXT: v_mul_hi_i32_i24_e32 v1, s1, v0
-; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: s_mov_b64 s[6:7], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_bfe_i32 s4, s4, 0x180000
+; SI-NEXT: s_bfe_i32 s5, s6, 0x180000
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: s_mul_i32 s4, s5, s4
+; SI-NEXT: v_mul_hi_i32_i24_e32 v1, s5, v0
+; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 31
; SI-NEXT: v_ashr_i64 v[0:1], v[0:1], 31
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_smul24_i33:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_bfe_i32 s2, s2, 0x180000
; VI-NEXT: s_bfe_i32 s3, s4, 0x180000
@@ -494,10 +487,10 @@ define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b
; VI-NEXT: v_mul_hi_i32_i24_e32 v1, s2, v0
; VI-NEXT: v_mul_i32_i24_e32 v0, s2, v0
; VI-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
-; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: v_ashrrev_i64 v[0:1], 31, v[0:1]
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_smul24_i33:
@@ -577,31 +570,29 @@ define amdgpu_kernel void @test_smulhi24_i33(ptr addrspace(1) %out, i33 %a, i33
; SI-LABEL: test_smulhi24_i33:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: v_mul_hi_i32_i24_e32 v0, s2, v0
+; SI-NEXT: s_mov_b64 s[6:7], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mul_hi_i32_i24_e32 v0, s6, v0
; SI-NEXT: v_and_b32_e32 v0, 1, v0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_smulhi24_i33:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: v_mul_hi_i32_i24_e32 v0, s2, v0
-; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_mov_b64 s[6:7], s[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: v_mul_hi_i32_i24_e32 v0, s6, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_and_b32_e32 v0, 1, v0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_smulhi24_i33:
diff --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
index e29da3a..1165401 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
@@ -10,46 +10,43 @@ define amdgpu_kernel void @test_umul24_i32(ptr addrspace(1) %out, i32 %a, i32 %b
; SI-LABEL: test_umul24_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_and_b32 s2, s2, 0xffffff
-; SI-NEXT: s_and_b32 s3, s3, 0xffffff
-; SI-NEXT: s_mul_i32 s2, s2, s3
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_and_b32 s2, s4, 0xffffff
+; SI-NEXT: s_and_b32 s4, s5, 0xffffff
+; SI-NEXT: s_mul_i32 s4, s2, s4
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_umul24_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_and_b32 s0, s2, 0xffffff
-; VI-NEXT: s_and_b32 s1, s3, 0xffffff
-; VI-NEXT: s_mul_i32 s0, s0, s1
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_and_b32 s4, s4, 0xffffff
+; VI-NEXT: s_and_b32 s5, s5, 0xffffff
+; VI-NEXT: s_mul_i32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_umul24_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_and_b32 s0, s2, 0xffffff
-; GFX9-NEXT: s_and_b32 s1, s3, 0xffffff
-; GFX9-NEXT: s_mul_i32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_and_b32 s4, s4, 0xffffff
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffffff
+; GFX9-NEXT: s_mul_i32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
entry:
%0 = shl i32 %a, 8
@@ -406,16 +403,15 @@ define amdgpu_kernel void @test_umulhi24_i32_i64(ptr addrspace(1) %out, i32 %a,
; GFX9-LABEL: test_umulhi24_i32_i64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_and_b32 s0, s2, 0xffffff
-; GFX9-NEXT: s_and_b32 s1, s3, 0xffffff
-; GFX9-NEXT: s_mul_hi_u32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_and_b32 s4, s4, 0xffffff
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffffff
+; GFX9-NEXT: s_mul_hi_u32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
entry:
%a.24 = and i32 %a, 16777215
@@ -632,33 +628,31 @@ define amdgpu_kernel void @test_umulhi16_i32(ptr addrspace(1) %out, i32 %a, i32
; SI-LABEL: test_umulhi16_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_and_b32 s2, s2, 0xffff
-; SI-NEXT: s_and_b32 s3, s3, 0xffff
-; SI-NEXT: s_mul_i32 s2, s2, s3
-; SI-NEXT: s_lshr_b32 s2, s2, 16
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_and_b32 s2, s4, 0xffff
+; SI-NEXT: s_and_b32 s4, s5, 0xffff
+; SI-NEXT: s_mul_i32 s2, s2, s4
+; SI-NEXT: s_lshr_b32 s4, s2, 16
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_umulhi16_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_and_b32 s0, s2, 0xffff
-; VI-NEXT: s_and_b32 s1, s3, 0xffff
-; VI-NEXT: s_mul_i32 s0, s0, s1
-; VI-NEXT: s_lshr_b32 s0, s0, 16
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: s_and_b32 s5, s5, 0xffff
+; VI-NEXT: s_mul_i32 s4, s4, s5
+; VI-NEXT: s_lshr_b32 s4, s4, 16
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_umulhi16_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll b/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll
new file mode 100644
index 0000000..5f303cc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll
@@ -0,0 +1,191 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+; Check that nofpclass attributes on call returns are used in
+; selectiondag.
+
+define internal float @func_f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: func_f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dword v0, v[0:1], off glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %ld = load volatile float, ptr addrspace(1) %ptr
+ ret float %ld
+}
+
+define float @call_nofpclass_funcs_f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: call_nofpclass_funcs_f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s18, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
+; CHECK-NEXT: buffer_store_dword v4, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[16:17]
+; CHECK-NEXT: s_addk_i32 s32, 0x400
+; CHECK-NEXT: v_writelane_b32 v4, s30, 0
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, func_f32@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, func_f32@rel32@hi+12
+; CHECK-NEXT: v_writelane_b32 v4, s31, 1
+; CHECK-NEXT: v_mov_b32_e32 v2, v0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, v2
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_min_f32_e32 v0, v3, v0
+; CHECK-NEXT: v_readlane_b32 s31, v4, 1
+; CHECK-NEXT: v_readlane_b32 s30, v4, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[4:5]
+; CHECK-NEXT: s_mov_b32 s33, s18
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) float @func_f32(ptr addrspace(1) %ptr)
+ %call1 = call nofpclass(nan) float @func_f32(ptr addrspace(1) %ptr)
+ %min = call float @llvm.minnum.f32(float %call0, float %call1)
+ ret float %min
+}
+
+define internal <2 x float> @func_v2f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: func_v2f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx2 v[0:1], v[0:1], off glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %ld = load volatile <2 x float>, ptr addrspace(1) %ptr
+ ret <2 x float> %ld
+}
+
+define <2 x float> @call_nofpclass_funcs_v2f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: call_nofpclass_funcs_v2f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s18, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
+; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[16:17]
+; CHECK-NEXT: s_addk_i32 s32, 0x400
+; CHECK-NEXT: v_writelane_b32 v6, s30, 0
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, func_v2f32@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, func_v2f32@rel32@hi+12
+; CHECK-NEXT: v_writelane_b32 v6, s31, 1
+; CHECK-NEXT: v_mov_b32_e32 v2, v1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_mov_b32_e32 v4, v0
+; CHECK-NEXT: v_mov_b32_e32 v5, v1
+; CHECK-NEXT: v_mov_b32_e32 v0, v3
+; CHECK-NEXT: v_mov_b32_e32 v1, v2
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_min_f32_e32 v0, v4, v0
+; CHECK-NEXT: v_min_f32_e32 v1, v5, v1
+; CHECK-NEXT: v_readlane_b32 s31, v6, 1
+; CHECK-NEXT: v_readlane_b32 s30, v6, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[4:5]
+; CHECK-NEXT: s_mov_b32 s33, s18
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) <2 x float> @func_v2f32(ptr addrspace(1) %ptr)
+ %call1 = call nofpclass(nan) <2 x float> @func_v2f32(ptr addrspace(1) %ptr)
+ %min = call <2 x float> @llvm.minnum.v2f32(<2 x float> %call0, <2 x float> %call1)
+ ret <2 x float> %min
+}
+
+define internal double @func_f64(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: func_f64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx2 v[0:1], v[0:1], off glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %ld = load volatile double, ptr addrspace(1) %ptr
+ ret double %ld
+}
+
+define double @call_nofpclass_funcs_f64(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: call_nofpclass_funcs_f64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s18, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
+; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[16:17]
+; CHECK-NEXT: s_addk_i32 s32, 0x400
+; CHECK-NEXT: v_writelane_b32 v6, s30, 0
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, func_f64@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, func_f64@rel32@hi+12
+; CHECK-NEXT: v_writelane_b32 v6, s31, 1
+; CHECK-NEXT: v_mov_b32_e32 v4, v1
+; CHECK-NEXT: v_mov_b32_e32 v5, v0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_mov_b32_e32 v2, v0
+; CHECK-NEXT: v_mov_b32_e32 v3, v1
+; CHECK-NEXT: v_mov_b32_e32 v0, v5
+; CHECK-NEXT: v_mov_b32_e32 v1, v4
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_min_f64 v[0:1], v[2:3], v[0:1]
+; CHECK-NEXT: v_readlane_b32 s31, v6, 1
+; CHECK-NEXT: v_readlane_b32 s30, v6, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[4:5]
+; CHECK-NEXT: s_mov_b32 s33, s18
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) double @func_f64(ptr addrspace(1) %ptr)
+ %call1 = call nofpclass(nan) double @func_f64(ptr addrspace(1) %ptr)
+ %min = call double @llvm.minnum.f64(double %call0, double %call1)
+ ret double %min
+}
+
+define float @call_nofpclass_intrinsic_f32(float %x, float %y, float %z) {
+; CHECK-LABEL: call_nofpclass_intrinsic_f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_sqrt_f32_e32 v0, v0
+; CHECK-NEXT: v_sqrt_f32_e32 v1, v1
+; CHECK-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) float @llvm.amdgcn.sqrt.f32(float %x)
+ %call1 = call nofpclass(nan) float @llvm.amdgcn.sqrt.f32(float %y)
+ %lt = fcmp olt float %call0, %call1
+ %min = select nsz i1 %lt, float %call0, float %call1
+ ret float %min
+}
+
+define <2 x half> @call_nofpclass_intrinsic_v2f16(float %x, float %y, float %z, float %w) {
+; CHECK-LABEL: call_nofpclass_intrinsic_v2f16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cvt_pkrtz_f16_f32 v0, v0, v1
+; CHECK-NEXT: v_cvt_pkrtz_f16_f32 v1, v2, v3
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; CHECK-NEXT: v_cmp_lt_f16_e32 vcc, v0, v1
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_cmp_lt_f16_e32 vcc, v3, v2
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; CHECK-NEXT: s_mov_b32 s4, 0x5040100
+; CHECK-NEXT: v_perm_b32 v0, v1, v0, s4
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y)
+ %call1 = call nofpclass(nan) <2 x half> @llvm.amdgcn.cvt.pkrtz(float %z, float %w)
+ %lt = fcmp olt <2 x half> %call0, %call1
+ %min = select nsz <2 x i1> %lt, <2 x half> %call0, <2 x half> %call1
+ ret <2 x half> %min
+}
diff --git a/llvm/test/CodeGen/AMDGPU/or.ll b/llvm/test/CodeGen/AMDGPU/or.ll
index 728067e..9afaab5 100644
--- a/llvm/test/CodeGen/AMDGPU/or.ll
+++ b/llvm/test/CodeGen/AMDGPU/or.ll
@@ -136,27 +136,25 @@ define amdgpu_kernel void @scalar_or_i32(ptr addrspace(1) %out, i32 %a, i32 %b)
; GFX6-LABEL: scalar_or_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_or_b32 s0, s2, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_or_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_or_b32 s0, s2, s3
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_or_b32 s4, s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
%or = or i32 %a, %b
store i32 %or, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir b/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir
index 381cb8c..f098618 100644
--- a/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir
+++ b/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir
@@ -16,11 +16,11 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
; CHECK-NEXT: undef [[S_LOAD_DWORD_IMM:%[0-9]+]].sub1:sgpr_128 = S_LOAD_DWORD_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
; CHECK-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 1
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 0
; CHECK-NEXT: undef [[S_MOV_B32_1:%[0-9]+]].sub0:sgpr_256 = S_MOV_B32 0
; CHECK-NEXT: TENSOR_LOAD_TO_LDS_D2 [[S_MOV_B32_]], [[S_MOV_B32_1]], 0, 0, implicit-def dead $tensorcnt, implicit $exec, implicit $tensorcnt
; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 1
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub0
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
diff --git a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
index 5c90957..bcece19 100644
--- a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
+++ b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
@@ -16,11 +16,11 @@ define amdgpu_cs void @if_then(ptr addrspace(8) inreg %input, ptr addrspace(8) i
; GCN-NEXT: s_cbranch_execz .LBB0_4
; GCN-NEXT: ; %bb.3: ; %.then
; GCN-NEXT: s_or_saveexec_b32 s1, -1
-; GCN-NEXT: v_cndmask_b32_e64 v1, 0, v3, s1
-; GCN-NEXT: v_mov_b32_e32 v2, 0
-; GCN-NEXT: v_mov_b32_dpp v2, v1 row_shr:1 row_mask:0xf bank_mask:0xf
+; GCN-NEXT: v_mov_b32_e32 v1, 0
+; GCN-NEXT: v_cndmask_b32_e64 v2, 0, v3, s1
+; GCN-NEXT: v_mov_b32_dpp v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf
; GCN-NEXT: s_mov_b32 exec_lo, s1
-; GCN-NEXT: v_mov_b32_e32 v0, v2
+; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: v_mov_b32_e32 v4, -1
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: buffer_store_dword v4, v0, s[4:7], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll b/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll
index a0bac53..e589a63 100644
--- a/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll
+++ b/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll
@@ -5,15 +5,14 @@ define amdgpu_kernel void @sext_i16_to_i32_uniform(ptr addrspace(1) %out, i16 %a
; GCN-LABEL: sext_i16_to_i32_uniform:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_sext_i32_i16 s0, s2
-; GCN-NEXT: s_add_i32 s0, s3, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_sext_i32_i16 s4, s4
+; GCN-NEXT: s_add_i32 s4, s5, s4
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%sext = sext i16 %a to i32
%res = add i32 %b, %sext
diff --git a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
index d8511c8..17db379 100644
--- a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
@@ -22,63 +22,57 @@ define amdgpu_kernel void @s_shl_v2i16(ptr addrspace(1) %out, <2 x i16> %lhs, <2
; VI-LABEL: s_shl_v2i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshr_b32 s0, s3, 16
-; VI-NEXT: s_lshr_b32 s1, s2, 16
-; VI-NEXT: s_lshl_b32 s0, s1, s0
-; VI-NEXT: s_lshl_b32 s1, s2, s3
-; VI-NEXT: s_lshl_b32 s0, s0, 16
-; VI-NEXT: s_and_b32 s1, s1, 0xffff
-; VI-NEXT: s_or_b32 s0, s1, s0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshr_b32 s6, s5, 16
+; VI-NEXT: s_lshr_b32 s7, s4, 16
+; VI-NEXT: s_lshl_b32 s4, s4, s5
+; VI-NEXT: s_lshl_b32 s5, s7, s6
+; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; CI-LABEL: s_shl_v2i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; CI-NEXT: s_mov_b32 s7, 0xf000
-; CI-NEXT: s_mov_b32 s6, -1
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_mov_b32 s4, s0
-; CI-NEXT: s_mov_b32 s5, s1
-; CI-NEXT: s_lshr_b32 s0, s2, 16
-; CI-NEXT: s_lshr_b32 s1, s3, 16
-; CI-NEXT: s_lshl_b32 s0, s0, s1
-; CI-NEXT: s_lshl_b32 s1, s2, s3
-; CI-NEXT: s_lshl_b32 s0, s0, 16
-; CI-NEXT: s_and_b32 s1, s1, 0xffff
-; CI-NEXT: s_or_b32 s0, s1, s0
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: s_lshr_b32 s6, s4, 16
+; CI-NEXT: s_lshr_b32 s7, s5, 16
+; CI-NEXT: s_lshl_b32 s4, s4, s5
+; CI-NEXT: s_lshl_b32 s5, s6, s7
+; CI-NEXT: s_lshl_b32 s5, s5, 16
+; CI-NEXT: s_and_b32 s4, s4, 0xffff
+; CI-NEXT: s_or_b32 s4, s4, s5
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; GFX10-LABEL: s_shl_v2i16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-NEXT: s_mov_b32 s7, 0x31016000
-; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_pk_lshlrev_b16 v0, s3, s2
-; GFX10-NEXT: s_mov_b32 s4, s0
-; GFX10-NEXT: s_mov_b32 s5, s1
-; GFX10-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX10-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-NEXT: s_mov_b32 s2, -1
+; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: s_shl_v2i16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-NEXT: s_mov_b32 s7, 0x31016000
-; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_pk_lshlrev_b16 v0, s3, s2
-; GFX11-NEXT: s_mov_b32 s4, s0
-; GFX11-NEXT: s_mov_b32 s5, s1
-; GFX11-NEXT: buffer_store_b32 v0, off, s[4:7], 0
+; GFX11-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-NEXT: s_endpgm
%result = shl <2 x i16> %lhs, %rhs
store <2 x i16> %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll
index d4ee6fa..7c841783 100644
--- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll
@@ -3272,9 +3272,8 @@ define void @v_shuffle_v4f32_v3f32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3287,8 +3286,7 @@ define void @v_shuffle_v4f32_v3f32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -3416,12 +3414,11 @@ define void @v_shuffle_v4f32_v3f32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v7, 0
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[4:6]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3434,12 +3431,12 @@ define void @v_shuffle_v4f32_v3f32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v7, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: ;;#ASMSTART
; GFX942-NEXT: ; def v[4:6]
; GFX942-NEXT: ;;#ASMEND
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -6083,9 +6080,8 @@ define void @v_shuffle_v4f32_v3f32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v5, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6102,8 +6098,7 @@ define void @v_shuffle_v4f32_v3f32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6241,9 +6236,8 @@ define void @v_shuffle_v4f32_v3f32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6256,8 +6250,7 @@ define void @v_shuffle_v4f32_v3f32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll
index 1a669ad..f714935 100644
--- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll
@@ -3272,9 +3272,8 @@ define void @v_shuffle_v4i32_v3i32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3287,8 +3286,7 @@ define void @v_shuffle_v4i32_v3i32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -3416,12 +3414,11 @@ define void @v_shuffle_v4i32_v3i32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v7, 0
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[4:6]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3434,12 +3431,12 @@ define void @v_shuffle_v4i32_v3i32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v7, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: ;;#ASMSTART
; GFX942-NEXT: ; def v[4:6]
; GFX942-NEXT: ;;#ASMEND
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -6083,9 +6080,8 @@ define void @v_shuffle_v4i32_v3i32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v5, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6102,8 +6098,7 @@ define void @v_shuffle_v4i32_v3i32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6241,9 +6236,8 @@ define void @v_shuffle_v4i32_v3i32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6256,8 +6250,7 @@ define void @v_shuffle_v4i32_v3i32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll
index 8039e12..aa9e23b 100644
--- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll
+++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll
@@ -3272,9 +3272,8 @@ define void @v_shuffle_v4p3_v3p3__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3287,8 +3286,7 @@ define void @v_shuffle_v4p3_v3p3__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -3416,12 +3414,11 @@ define void @v_shuffle_v4p3_v3p3__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v7, 0
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[4:6]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3434,12 +3431,12 @@ define void @v_shuffle_v4p3_v3p3__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v7, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: ;;#ASMSTART
; GFX942-NEXT: ; def v[4:6]
; GFX942-NEXT: ;;#ASMEND
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -6083,9 +6080,8 @@ define void @v_shuffle_v4p3_v3p3__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v5, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6102,8 +6098,7 @@ define void @v_shuffle_v4p3_v3p3__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6241,9 +6236,8 @@ define void @v_shuffle_v4p3_v3p3__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6256,8 +6250,7 @@ define void @v_shuffle_v4p3_v3p3__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/sign_extend.ll b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
index cb8bbde..ece46b5 100644
--- a/llvm/test/CodeGen/AMDGPU/sign_extend.ll
+++ b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
@@ -6,29 +6,27 @@ define amdgpu_kernel void @s_sext_i1_to_i32(ptr addrspace(1) %out, i32 %a, i32 %
; SI-LABEL: s_sext_i1_to_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_cmp_eq_u32 s2, s3
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_cmp_eq_u32 s4, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sext_i1_to_i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_cmp_eq_u32 s2, s3
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_cmp_eq_u32 s4, s5
+; VI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i32
@@ -78,31 +76,29 @@ define amdgpu_kernel void @s_sext_i1_to_i64(ptr addrspace(1) %out, i32 %a, i32 %
; SI-LABEL: s_sext_i1_to_i64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_cmp_eq_u32 s2, s3
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_cmp_eq_u32 s4, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v1, v0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sext_i1_to_i64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_cmp_eq_u32 s2, s3
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_cmp_eq_u32 s4, s5
+; VI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i64
@@ -218,29 +214,27 @@ define amdgpu_kernel void @s_sext_i1_to_i16(ptr addrspace(1) %out, i32 %a, i32 %
; SI-LABEL: s_sext_i1_to_i16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_cmp_eq_u32 s2, s3
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_cmp_eq_u32 s4, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sext_i1_to_i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_cmp_eq_u32 s2, s3
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_cmp_eq_u32 s4, s5
+; VI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i16
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index 5461532..e836366 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -1797,8 +1797,8 @@ define amdgpu_ps void @complex_loop(i32 inreg %cmpa, i32 %cmpb, i32 %cmpc) {
; GFX10-WAVE32-NEXT: s_cbranch_scc1 .LBB15_7
; GFX10-WAVE32-NEXT: ; %bb.1: ; %.lr.ph
; GFX10-WAVE32-NEXT: s_mov_b32 s1, exec_lo
-; GFX10-WAVE32-NEXT: s_mov_b32 s0, 0
; GFX10-WAVE32-NEXT: s_mov_b32 s2, 0
+; GFX10-WAVE32-NEXT: s_mov_b32 s0, 0
; GFX10-WAVE32-NEXT: s_branch .LBB15_3
; GFX10-WAVE32-NEXT: .LBB15_2: ; %latch
; GFX10-WAVE32-NEXT: ; in Loop: Header=BB15_3 Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
index 4799876..76f8f48 100644
--- a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
@@ -369,42 +369,41 @@ define amdgpu_kernel void @s_abs_v4i16(ptr addrspace(1) %out, <4 x i16> %val) #0
; CI-LABEL: s_abs_v4i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; CI-NEXT: s_mov_b32 s7, 0xf000
-; CI-NEXT: s_mov_b32 s6, -1
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_mov_b32 s4, s0
-; CI-NEXT: s_mov_b32 s5, s1
-; CI-NEXT: s_ashr_i32 s0, s3, 16
-; CI-NEXT: s_ashr_i32 s1, s2, 16
-; CI-NEXT: s_lshr_b32 s8, s2, 16
-; CI-NEXT: s_lshr_b32 s9, s3, 16
-; CI-NEXT: s_sext_i32_i16 s10, s3
-; CI-NEXT: s_sext_i32_i16 s11, s2
-; CI-NEXT: s_sub_i32 s3, 0, s3
-; CI-NEXT: s_sub_i32 s2, 0, s2
-; CI-NEXT: s_sext_i32_i16 s3, s3
-; CI-NEXT: s_sext_i32_i16 s2, s2
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: s_ashr_i32 s6, s5, 16
+; CI-NEXT: s_lshr_b32 s9, s5, 16
+; CI-NEXT: s_sext_i32_i16 s10, s5
+; CI-NEXT: s_sub_i32 s5, 0, s5
+; CI-NEXT: s_ashr_i32 s7, s4, 16
+; CI-NEXT: s_lshr_b32 s8, s4, 16
+; CI-NEXT: s_sext_i32_i16 s11, s4
+; CI-NEXT: s_sext_i32_i16 s5, s5
+; CI-NEXT: s_sub_i32 s4, 0, s4
; CI-NEXT: s_sub_i32 s9, 0, s9
-; CI-NEXT: s_sub_i32 s8, 0, s8
+; CI-NEXT: s_sext_i32_i16 s4, s4
; CI-NEXT: s_sext_i32_i16 s9, s9
+; CI-NEXT: s_sub_i32 s8, 0, s8
+; CI-NEXT: s_max_i32 s5, s10, s5
; CI-NEXT: s_sext_i32_i16 s8, s8
-; CI-NEXT: s_max_i32 s2, s11, s2
-; CI-NEXT: s_max_i32 s3, s10, s3
-; CI-NEXT: s_max_i32 s1, s1, s8
-; CI-NEXT: s_max_i32 s0, s0, s9
-; CI-NEXT: s_add_i32 s3, s3, 2
-; CI-NEXT: s_add_i32 s2, s2, 2
-; CI-NEXT: s_lshl_b32 s0, s0, 16
-; CI-NEXT: s_and_b32 s3, s3, 0xffff
-; CI-NEXT: s_lshl_b32 s1, s1, 16
-; CI-NEXT: s_and_b32 s2, s2, 0xffff
-; CI-NEXT: s_or_b32 s0, s0, s3
-; CI-NEXT: s_or_b32 s1, s1, s2
-; CI-NEXT: s_add_i32 s0, s0, 0x20000
-; CI-NEXT: s_add_i32 s1, s1, 0x20000
-; CI-NEXT: v_mov_b32_e32 v0, s1
-; CI-NEXT: v_mov_b32_e32 v1, s0
-; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; CI-NEXT: s_max_i32 s6, s6, s9
+; CI-NEXT: s_max_i32 s4, s11, s4
+; CI-NEXT: s_add_i32 s5, s5, 2
+; CI-NEXT: s_max_i32 s7, s7, s8
+; CI-NEXT: s_lshl_b32 s6, s6, 16
+; CI-NEXT: s_and_b32 s5, s5, 0xffff
+; CI-NEXT: s_add_i32 s4, s4, 2
+; CI-NEXT: s_or_b32 s5, s6, s5
+; CI-NEXT: s_lshl_b32 s6, s7, 16
+; CI-NEXT: s_and_b32 s4, s4, 0xffff
+; CI-NEXT: s_or_b32 s4, s6, s4
+; CI-NEXT: s_add_i32 s5, s5, 0x20000
+; CI-NEXT: s_add_i32 s4, s4, 0x20000
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: v_mov_b32_e32 v1, s5
+; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
%z0 = insertelement <4 x i16> poison, i16 0, i16 0
%z1 = insertelement <4 x i16> %z0, i16 0, i16 1
diff --git a/llvm/test/CodeGen/AMDGPU/sub.ll b/llvm/test/CodeGen/AMDGPU/sub.ll
index 5c113d8..0a51601 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.ll
@@ -11,14 +11,13 @@ define amdgpu_kernel void @s_sub_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
; GFX6-LABEL: s_sub_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_sub_i32 s0, s2, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_sub_i32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_sub_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
index 6a273e5..82ef28f 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
@@ -223,44 +223,39 @@ define amdgpu_kernel void @s_test_sub_v2i16_kernarg(ptr addrspace(1) %out, <2 x
; VI-LABEL: s_test_sub_v2i16_kernarg:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshr_b32 s0, s3, 16
-; VI-NEXT: s_lshr_b32 s1, s2, 16
-; VI-NEXT: s_sub_i32 s0, s1, s0
-; VI-NEXT: s_sub_i32 s1, s2, s3
-; VI-NEXT: s_lshl_b32 s0, s0, 16
-; VI-NEXT: s_and_b32 s1, s1, 0xffff
-; VI-NEXT: s_or_b32 s0, s1, s0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshr_b32 s6, s5, 16
+; VI-NEXT: s_lshr_b32 s7, s4, 16
+; VI-NEXT: s_sub_i32 s4, s4, s5
+; VI-NEXT: s_sub_i32 s5, s7, s6
+; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX10-LABEL: s_test_sub_v2i16_kernarg:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-NEXT: s_mov_b32 s7, 0x31016000
-; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_pk_sub_i16 v0, s2, s3
-; GFX10-NEXT: s_mov_b32 s4, s0
-; GFX10-NEXT: s_mov_b32 s5, s1
-; GFX10-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX10-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-NEXT: s_mov_b32 s2, -1
+; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: s_test_sub_v2i16_kernarg:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-NEXT: s_mov_b32 s7, 0x31016000
-; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_pk_sub_i16 v0, s2, s3
-; GFX11-NEXT: s_mov_b32 s4, s0
-; GFX11-NEXT: s_mov_b32 s5, s1
-; GFX11-NEXT: buffer_store_b32 v0, off, s[4:7], 0
+; GFX11-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-NEXT: s_endpgm
%add = sub <2 x i16> %a, %b
store <2 x i16> %add, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll
index 063c56f..1f93bf7 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -189,67 +189,65 @@ define amdgpu_kernel void @s_udiv_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
; SI-LABEL: s_udiv_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_u32_e32 v0, s3
-; SI-NEXT: s_sub_i32 s4, 0, s3
-; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s2, 0, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
-; SI-NEXT: v_mul_lo_u32 v1, s4, v0
-; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: v_mul_lo_u32 v1, s2, v0
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mul_hi_u32 v1, v0, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; SI-NEXT: v_mul_hi_u32 v0, s2, v0
-; SI-NEXT: v_readfirstlane_b32 s0, v0
-; SI-NEXT: s_mul_i32 s0, s0, s3
-; SI-NEXT: s_sub_i32 s0, s2, s0
-; SI-NEXT: s_sub_i32 s1, s0, s3
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; SI-NEXT: s_cmp_ge_u32 s0, s3
+; SI-NEXT: s_cmp_ge_u32 s4, s5
; SI-NEXT: s_cselect_b64 vcc, -1, 0
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT: s_cselect_b32 s0, s1, s0
+; SI-NEXT: s_cselect_b32 s4, s6, s4
; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; SI-NEXT: s_cmp_ge_u32 s0, s3
+; SI-NEXT: s_cmp_ge_u32 s4, s5
; SI-NEXT: s_cselect_b64 vcc, -1, 0
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_udiv_i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
-; VI-NEXT: s_sub_i32 s4, 0, s3
-; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; VI-NEXT: s_sub_i32 s2, 0, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
-; VI-NEXT: v_mul_lo_u32 v1, s4, v0
-; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: v_mul_lo_u32 v1, s2, v0
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_mul_hi_u32 v1, v0, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
-; VI-NEXT: v_mul_hi_u32 v0, s2, v0
-; VI-NEXT: v_readfirstlane_b32 s0, v0
-; VI-NEXT: s_mul_i32 s0, s0, s3
-; VI-NEXT: s_sub_i32 s0, s2, s0
-; VI-NEXT: s_sub_i32 s1, s0, s3
+; VI-NEXT: v_mul_hi_u32 v0, s4, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s5
+; VI-NEXT: s_sub_i32 s4, s4, s6
+; VI-NEXT: s_sub_i32 s6, s4, s5
; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
-; VI-NEXT: s_cmp_ge_u32 s0, s3
+; VI-NEXT: s_cmp_ge_u32 s4, s5
; VI-NEXT: s_cselect_b64 vcc, -1, 0
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT: s_cselect_b32 s0, s1, s0
+; VI-NEXT: s_cselect_b32 s4, s6, s4
; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
-; VI-NEXT: s_cmp_ge_u32 s0, s3
+; VI-NEXT: s_cmp_ge_u32 s4, s5
; VI-NEXT: s_cselect_b64 vcc, -1, 0
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GCN-LABEL: s_udiv_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 775483c..1c50f93 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -716,8 +716,6 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_and_b32 s3, s3, 0xffff
@@ -729,25 +727,23 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s2
; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: v_mul_f32_e32 v2, v1, v2
; GCN-NEXT: v_trunc_f32_e32 v2, v2
+; GCN-NEXT: v_cvt_u32_f32_e32 v4, v2
; GCN-NEXT: v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
-; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc
; GCN-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GCN-NEXT: buffer_store_short v3, off, s[4:7], 0 offset:4
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: buffer_store_short v3, off, s[0:3], 0 offset:4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_udiv24_i48:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
-; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_and_b32 s3, s3, 0xffff
@@ -759,17 +755,17 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v1, s2
; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT: s_mov_b32 s4, s0
-; GCN-IR-NEXT: s_mov_b32 s5, s1
+; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT: s_mov_b32 s2, -1
; GCN-IR-NEXT: v_mul_f32_e32 v2, v1, v2
; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT: v_cvt_u32_f32_e32 v4, v2
; GCN-IR-NEXT: v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT: v_cvt_u32_f32_e32 v2, v2
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc
; GCN-IR-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GCN-IR-NEXT: buffer_store_short v3, off, s[4:7], 0 offset:4
-; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-IR-NEXT: buffer_store_short v3, off, s[0:3], 0 offset:4
+; GCN-IR-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-IR-NEXT: s_endpgm
%1 = lshr i48 %x, 24
%2 = lshr i48 %y, 24
diff --git a/llvm/test/CodeGen/AMDGPU/while-break.ll b/llvm/test/CodeGen/AMDGPU/while-break.ll
index 19c8e84..2b7e283 100644
--- a/llvm/test/CodeGen/AMDGPU/while-break.ll
+++ b/llvm/test/CodeGen/AMDGPU/while-break.ll
@@ -157,8 +157,8 @@ define amdgpu_ps < 2 x float> @while_break_two_chains_of_phi(float %v, i32 %x, i
; GCN-LABEL: while_break_two_chains_of_phi:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: v_mov_b32_e32 v6, 0
-; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: s_mov_b32 s0, 0
+; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: s_branch .LBB2_2
; GCN-NEXT: .LBB2_1: ; %Flow1
; GCN-NEXT: ; in Loop: Header=BB2_2 Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/xor.ll b/llvm/test/CodeGen/AMDGPU/xor.ll
index feb6ecd..92280b9 100644
--- a/llvm/test/CodeGen/AMDGPU/xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/xor.ll
@@ -298,14 +298,13 @@ define amdgpu_kernel void @scalar_xor_i32(ptr addrspace(1) %out, i32 %a, i32 %b)
; SI-LABEL: scalar_xor_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_xor_b32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_xor_b32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: scalar_xor_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll b/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll
index c393582..d9f5ba9 100644
--- a/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll
+++ b/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll
@@ -5,15 +5,14 @@ define amdgpu_kernel void @zext_i16_to_i32_uniform(ptr addrspace(1) %out, i16 %a
; GCN-LABEL: zext_i16_to_i32_uniform:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_and_b32 s0, s2, 0xffff
-; GCN-NEXT: s_add_i32 s0, s3, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_and_b32 s4, s4, 0xffff
+; GCN-NEXT: s_add_i32 s4, s5, s4
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%zext = zext i16 %a to i32
%res = add i32 %b, %zext
diff --git a/llvm/test/CodeGen/ARM/llvm.sincospi.ll b/llvm/test/CodeGen/ARM/llvm.sincospi.ll
new file mode 100644
index 0000000..91bf0aa
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/llvm.sincospi.ll
@@ -0,0 +1,249 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=thumbv7-apple-ios7.0.0 < %s | FileCheck %s
+
+define { half, half } @test_sincospi_f16(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: mov r1, r0
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r4, pc}
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ ret { half, half } %result
+}
+
+define half @test_sincospi_f16_only_use_sin(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_sin:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.0 = extractvalue { half, half } %result, 0
+ ret half %result.0
+}
+
+define half @test_sincospi_f16_only_use_cos(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_cos:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.1 = extractvalue { half, half } %result, 1
+ ret half %result.1
+}
+
+define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #24
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #12
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: ldr r1, [sp, #4]
+; CHECK-NEXT: strh.w r0, [sp, #22]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: strh.w r0, [sp, #20]
+; CHECK-NEXT: add r0, sp, #20
+; CHECK-NEXT: vld1.32 {d8[0]}, [r0:32]
+; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: ldr r1, [sp]
+; CHECK-NEXT: strh.w r0, [sp, #18]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: strh.w r0, [sp, #16]
+; CHECK-NEXT: add r0, sp, #16
+; CHECK-NEXT: vmovl.u16 q9, d8
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vmov.32 r0, d18[0]
+; CHECK-NEXT: vmov.32 r1, d18[1]
+; CHECK-NEXT: vmov.32 r2, d16[0]
+; CHECK-NEXT: vmov.32 r3, d16[1]
+; CHECK-NEXT: add sp, #24
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r4, pc}
+ %result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a)
+ ret { <2 x half>, <2 x half> } %result
+}
+
+define { float, float } @test_sincospi_f32(float %a) #0 {
+; CHECK-LABEL: test_sincospi_f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldrd r1, r0, [sp], #8
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: vmov d8, r0, r1
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: vmov r0, s17
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: add r1, sp, #12
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: vldr s1, [sp, #4]
+; CHECK-NEXT: vldr s3, [sp]
+; CHECK-NEXT: vldr s0, [sp, #12]
+; CHECK-NEXT: vldr s2, [sp, #8]
+; CHECK-NEXT: vmov r0, r1, d0
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a)
+ ret { <2 x float>, <2 x float> } %result
+}
+
+define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v3f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: mov r6, r2
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: add r1, sp, #12
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: mov r0, r7
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #36]
+; CHECK-NEXT: vmov d0, r7, r6
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: add.w r2, r4, #16
+; CHECK-NEXT: vmov d1, r5, r0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vldr s1, [sp, #8]
+; CHECK-NEXT: vldr s3, [sp, #12]
+; CHECK-NEXT: vldr s2, [sp, #4]
+; CHECK-NEXT: vldr s0, [sp]
+; CHECK-NEXT: vst1.32 {d1}, [r1:64]!
+; CHECK-NEXT: vst1.32 {d0}, [r2:64]!
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
+ %result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a)
+ ret { <3 x float>, <3 x float> } %result
+}
+
+define { double, double } @test_sincospi_f64(double %a) #0 {
+; CHECK-LABEL: test_sincospi_f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r7, lr}
+; CHECK-NEXT: add r7, sp, #4
+; CHECK-NEXT: sub sp, #20
+; CHECK-NEXT: mov r4, sp
+; CHECK-NEXT: bfc r4, #0, #3
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r3, sp
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: subs r4, r7, #4
+; CHECK-NEXT: ldrd r0, r1, [sp, #8]
+; CHECK-NEXT: ldrd r2, r3, [sp]
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: pop {r4, r7, pc}
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
+
+define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT: add r7, sp, #16
+; CHECK-NEXT: sub sp, #32
+; CHECK-NEXT: mov r4, sp
+; CHECK-NEXT: bfc r4, #0, #3
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: mov r6, r1
+; CHECK-NEXT: ldr r1, [r7, #8]
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r8, r2
+; CHECK-NEXT: add r2, sp, #24
+; CHECK-NEXT: add r3, sp, #16
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r3, sp
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: mov r1, r8
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: vldr d19, [sp, #24]
+; CHECK-NEXT: vldr d18, [sp, #8]
+; CHECK-NEXT: vldr d17, [sp, #16]
+; CHECK-NEXT: vldr d16, [sp]
+; CHECK-NEXT: vst1.32 {d18, d19}, [r4]!
+; CHECK-NEXT: vst1.32 {d16, d17}, [r4]
+; CHECK-NEXT: sub.w r4, r7, #16
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
+ %result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll
new file mode 100644
index 0000000..22fba8c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll
@@ -0,0 +1,59 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Tests for indexed types in dynamically indexed arrays in cbuffers.
+;
+; struct S {
+; float x[2];
+; uint q;
+; };
+; cbuffer CB : register(b0) {
+; uint32_t3 w[3]; // offset 0, size 12 (+4) * 3
+; S v[3]; // offset 48, size 24 (+8) * 3
+; }
+%S = type <{ <{ [1 x <{ float, target("dx.Padding", 12) }>], float }>, i32 }>
+%__cblayout_CB = type <{
+ <{
+ [2 x <{ <3 x i32>, target("dx.Padding", 4) }>],
+ <3 x i32>
+ }>,
+ target("dx.Padding", 4),
+ <{
+ [2 x <{ %S, target("dx.Padding", 8) }>], %S
+ }>
+}>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst, i32 %idx) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; w[2].z
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: store i32 [[X]], ptr %dst
+ %w_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %w_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %w_ptr, i32 40
+ %w_load = load i32, ptr addrspace(2) %w_gep, align 4
+ store i32 %w_load, ptr %dst, align 4
+
+ ;; v[2].q
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 8)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %v_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %v_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %v_ptr, i32 84
+ %v_load = load i32, ptr addrspace(2) %v_gep, align 4
+ %v.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %v_load, ptr %v.i, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll
new file mode 100644
index 0000000..615fc5e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll
@@ -0,0 +1,49 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Test for when we have indices into both the array and the vector: ie, s[1][3]
+
+; cbuffer CB : register(b0) {
+; uint4 s[3]; // offset 0, size 16 * 3
+; }
+%__cblayout_CB = type <{ [2 x <4 x i32>] }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; s[1][3]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ; CHECK: store i32 [[X]], ptr %dst
+ %i8_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %i8_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %i8_ptr, i32 28
+ %i8_vecext = load i32, ptr addrspace(2) %i8_gep, align 4
+ store i32 %i8_vecext, ptr %dst, align 4
+
+ ;; s[2].w
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ;;
+ ;; It would be nice to avoid the redundant vector creation here, but that's
+ ;; outside of the scope of this pass.
+ ;;
+ ; CHECK: [[X_VEC:%.*]] = insertelement <4 x i32> {{%.*}}, i32 [[X]], i32 3
+ ; CHECK: [[X_EXT:%.*]] = extractelement <4 x i32> [[X_VEC]], i32 3
+ ; CHECK: store i32 [[X_EXT]], ptr %dst
+ %typed_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %typed_gep = getelementptr <4 x i32>, ptr addrspace(2) %typed_ptr, i32 2
+ %typed_load = load <4 x i32>, ptr addrspace(2) %typed_gep, align 16
+ %typed_vecext = extractelement <4 x i32> %typed_load, i32 3
+ store i32 %typed_vecext, ptr %dst, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll
new file mode 100644
index 0000000..eabc07c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB : register(b0) {
+; float a1[3];
+; }
+%__cblayout_CB = type <{ [2 x <{ float, [12 x i8] }>], float }>
+
+@CB.cb = global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h = call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ;; a1[1]
+ ;; Note that the valid GEPs of a1 are `0, 0, 0`, `0, 0, 1`, and `0, 1`.
+ ;
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: store float [[X]], ptr %dst
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 8
+ %a1_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1_gep = getelementptr inbounds <{ [2 x <{ float, [12 x i8] }>], float }>, ptr addrspace(2) %a1_ptr, i32 0, i32 0, i32 1
+ %a1 = load float, ptr addrspace(2) %a1_gep, align 4
+ store float %a1, ptr %dst, align 32
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll
new file mode 100644
index 0000000..6f6166e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll
@@ -0,0 +1,145 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB : register(b0) {
+; float a1[3]; // offset 0, size 4 (+12) * 3
+; double3 a2[2]; // offset 48, size 24 (+8) * 2
+; float16_t a3[2][2]; // offset 112, size 2 (+14) * 4
+; uint64_t a4[3]; // offset 176, size 8 (+8) * 3
+; int4 a5[2][3][4]; // offset 224, size 16 * 24
+; uint16_t a6[1]; // offset 608, size 2 (+14) * 1
+; int64_t a7[2]; // offset 624, size 8 (+8) * 2
+; bool a8[4]; // offset 656, size 4 (+12) * 4
+; }
+%__cblayout_CB = type <{
+ <{ [2 x <{ float, target("dx.Padding", 12) }>], float }>, target("dx.Padding", 12),
+ <{ [1 x <{ <3 x double>, target("dx.Padding", 8) }>], <3 x double> }>, target("dx.Padding", 8),
+ <{ [3 x <{ half, target("dx.Padding", 14) }>], half }>, target("dx.Padding", 14),
+ <{ [2 x <{ i64, target("dx.Padding", 8) }>], i64 }>, target("dx.Padding", 8),
+ [24 x <4 x i32>],
+ [1 x i16], target("dx.Padding", 14),
+ <{ [1 x <{ i64, target("dx.Padding", 8) }>], i64 }>, target("dx.Padding", 8),
+ <{ [3 x <{ i32, target("dx.Padding", 12) }>], i32 }>
+}>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h.i.i = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h.i.i, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; a1[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: store float [[X]], ptr %dst
+ %a1_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a1_ptr, i32 16
+ %a1 = load float, ptr addrspace(2) %a1_gep, align 4
+ store float %a1, ptr %dst, align 32
+
+ ;; a2[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 5)
+ ; CHECK: [[X:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { double, double } [[LOAD]], 1
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 6)
+ ; CHECK: [[Z:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x double> poison, double [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x double> [[VEC0]], double [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x double> [[VEC1]], double [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ ; CHECK: store <3 x double> [[VEC2]], ptr [[PTR]]
+ %a2_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %a2_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a2_ptr, i32 32
+ %a2 = load <3 x double>, ptr addrspace(2) %a2_gep, align 8
+ %a2.i = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ store <3 x double> %a2, ptr %a2.i, align 32
+
+ ;; a3[0][1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { half, half, half, half, half, half, half, half } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 8)
+ ; CHECK: [[X:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 32
+ ; CHECK: store half [[X]], ptr [[PTR]]
+ %a3_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 112)
+ %a3_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a3_ptr, i32 16
+ %a3 = load half, ptr addrspace(2) %a3_gep, align 2
+ %a3.i = getelementptr inbounds nuw i8, ptr %dst, i32 32
+ store half %a3, ptr %a3.i, align 2
+
+ ;; a4[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 12)
+ ; CHECK: [[X:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ ; CHECK: store i64 [[X]], ptr [[PTR]]
+ %a4_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 176)
+ %a4_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a4_ptr, i32 16
+ %a4 = load i64, ptr addrspace(2) %a4_gep, align 8
+ %a4.i = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ store i64 %a4, ptr %a4.i, align 8
+
+ ;; a5[1][0][0]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 26)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: [[A:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ; CHECK: [[VEC0:%.*]] = insertelement <4 x i32> poison, i32 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC0]], i32 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <4 x i32> [[VEC1]], i32 [[Z]], i32 2
+ ; CHECK: [[VEC3:%.*]] = insertelement <4 x i32> [[VEC2]], i32 [[A]], i32 3
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ ; CHECK: store <4 x i32> [[VEC3]], ptr [[PTR]]
+ %a5_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 224)
+ %a5_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a5_ptr, i32 192
+ %a5 = load <4 x i32>, ptr addrspace(2) %a5_gep, align 4
+ %a5.i = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ store <4 x i32> %a5, ptr %a5.i, align 4
+
+ ;; a6[0]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i16, i16, i16, i16, i16, i16, i16, i16 } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 38)
+ ; CHECK: [[X:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 64
+ ; CHECK: store i16 [[X]], ptr [[PTR]]
+ %a6_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 608)
+ %a6 = load i16, ptr addrspace(2) %a6_ptr, align 2
+ %a6.i = getelementptr inbounds nuw i8, ptr %dst, i32 64
+ store i16 %a6, ptr %a6.i, align 2
+
+ ;; a7[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 40)
+ ; CHECK: [[X:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ ; CHECK: store i64 [[X]], ptr [[PTR]]
+ %a7_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 624)
+ %a7_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a7_ptr, i32 16
+ %a7 = load i64, ptr addrspace(2) %a7_gep, align 8
+ %a7.i = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ store i64 %a7, ptr %a7.i, align 8
+
+ ;; a8[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 42)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 80
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %a8_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 656)
+ %a8_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a8_ptr, i32 16
+ %a8 = load i32, ptr addrspace(2) %a8_gep, align 4, !range !0, !noundef !1
+ %a8.i = getelementptr inbounds nuw i8, ptr %dst, i32 80
+ store i32 %a8, ptr %a8.i, align 4
+
+ ret void
+}
+
+!0 = !{i32 0, i32 2}
+!1 = !{}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll
new file mode 100644
index 0000000..22994cf
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll
@@ -0,0 +1,64 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Tests for indexed types in dynamically indexed arrays in cbuffers.
+;
+; Bug https://github.com/llvm/llvm-project/issues/164517
+; XFAIL: *
+;
+; struct S {
+; float x[2];
+; uint q;
+; };
+; cbuffer CB : register(b0) {
+; uint32_t3 w[3]; // offset 0, size 12 (+4) * 3
+; S v[3]; // offset 48, size 24 (+8) * 3
+; }
+%S = type <{ <{ [1 x <{ float, target("dx.Padding", 12) }>], float }>, i32 }>
+%__cblayout_CB = type <{
+ <{
+ [2 x <{ <3 x i32>, target("dx.Padding", 4) }>],
+ <3 x i32>
+ }>,
+ target("dx.Padding", 4),
+ <{
+ [2 x <{ %S, target("dx.Padding", 8) }>], %S
+ }>
+}>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst, i32 %idx) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; w[idx].z
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 %idx)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: store i32 [[X]], ptr %dst
+ %w_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %w_arrayidx = getelementptr <3 x i32>, ptr addrspace(2) %w_ptr, i32 %idx
+ %w_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %w_arrayidx, i32 4
+ %w_load = load i32, ptr addrspace(2) %w_gep, align 4
+ store i32 %w_load, ptr %dst, align 4
+
+ ;; v[idx].q
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 %idx)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %v_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %v_arrayidx = getelementptr <{ %struct.S, target("dx.Padding", 4) }>, ptr addrspace(2) %v_ptr, i32 %idx
+ %v_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %v_arrayidx, i32 8
+ %v_load = load i32, ptr addrspace(2) %v_gep, align 4
+ %v.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %v_load, ptr %v.i, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll
new file mode 100644
index 0000000..7daebaed
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll
@@ -0,0 +1,46 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Tests for dynamic indices into arrays in cbuffers.
+
+; cbuffer CB : register(b0) {
+; uint s[10]; // offset 0, size 4 (+12) * 10
+; uint t[12]; // offset 160, size 4 (+12) * 12
+; }
+%__cblayout_CB = type <{ <{ [9 x <{ i32, target("dx.Padding", 12) }>], i32 }>, target("dx.Padding", 12), <{ [11 x <{ i32, target("dx.Padding", 12) }>], i32 }> }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst, i32 %idx) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; s[idx]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 %idx)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: store i32 [[X]], ptr %dst
+ %s_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %s_gep = getelementptr <{ i32, target("dx.Padding", 12) }>, ptr addrspace(2) %s_ptr, i32 %idx
+ %s_load = load i32, ptr addrspace(2) %s_gep, align 4
+ store i32 %s_load, ptr %dst, align 4
+
+ ;; t[idx]
+ ;
+ ; CHECK: [[T_IDX:%.*]] = add i32 10, %idx
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 [[T_IDX]])
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %t_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 160)
+ %t_gep = getelementptr <{ i32, target("dx.Padding", 12) }>, ptr addrspace(2) %t_ptr, i32 %idx
+ %t_load = load i32, ptr addrspace(2) %t_gep, align 4
+ %t.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %t_load, ptr %t.i, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll
new file mode 100644
index 0000000..65c9a3e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll
@@ -0,0 +1,101 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB {
+; float a1; // offset 0, size 4
+; int a2; // offset 4, size 4
+; bool a3; // offset 8, size 4
+; float16_t a4; // offset 12, size 2
+; uint16_t a5; // offset 14, size 2
+; double a6; // offset 16, size 8
+; int64_t a7; // offset 24, size 8
+; }
+%__cblayout_CB = type <{ float, i32, i32, half, i16, double, i64 }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h.i.i = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h.i.i, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 8
+
+ ;; a1
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A1:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: store float [[A1]], ptr %dst
+ %a1_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1 = load float, ptr addrspace(2) %a1_ptr, align 4
+ store float %a1, ptr %dst, align 8
+
+ ;; a2
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A2:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[A2]], ptr [[PTR]]
+ %a2_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 4)
+ %a2 = load i32, ptr addrspace(2) %a2_ptr, align 4
+ %a2.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %a2, ptr %a2.i, align 8
+
+ ;; a3
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A3:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ ; CHECK: store i32 [[A3]], ptr [[PTR]]
+ %a3_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 8)
+ %a3 = load i32, ptr addrspace(2) %a3_ptr, align 4
+ %a3.i = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ store i32 %a3, ptr %a3.i, align 4
+
+ ;; a4
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { half, half, half, half, half, half, half, half } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A4:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 6
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 12
+ ; CHECK: store half [[A4]], ptr [[PTR]]
+ %a4_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 12)
+ %a4 = load half, ptr addrspace(2) %a4_ptr, align 2
+ %a4.i = getelementptr inbounds nuw i8, ptr %dst, i32 12
+ store half %a4, ptr %a4.i, align 4
+
+ ;; a5
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i16, i16, i16, i16, i16, i16, i16, i16 } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A5:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 7
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 14
+ ; CHECK: store i16 [[A5]], ptr [[PTR]]
+ %a5_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 14)
+ %a5 = load i16, ptr addrspace(2) %a5_ptr, align 2
+ %a5.i = getelementptr inbounds nuw i8, ptr %dst, i32 14
+ store i16 %a5, ptr %a5.i, align 2
+
+ ;; a6
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[A6:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ ; CHECK: store double [[A6]], ptr [[PTR]]
+ %a6_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 16)
+ %a6 = load double, ptr addrspace(2) %a6_ptr, align 8
+ %a6.i = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ store double %a6, ptr %a6.i, align 8
+
+ ;; a7
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[A7:%.*]] = extractvalue { i64, i64 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 24
+ ; CHECK: store i64 [[A7]], ptr [[PTR]]
+ %a7_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 24)
+ %a7 = load i64, ptr addrspace(2) %a7_ptr, align 8
+ %a7.i = getelementptr inbounds nuw i8, ptr %dst, i32 24
+ store i64 %a7, ptr %a7.i, align 8
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll
new file mode 100644
index 0000000..0156a1a
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll
@@ -0,0 +1,121 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB {
+; float3 a1; // offset 0, size 12 (+4)
+; double3 a2; // offset 16, size 24
+; float16_t2 a3; // offset 40, size 4 (+4)
+; uint64_t3 a4; // offset 48, size 24 (+8)
+; int4 a5; // offset 80, size 16
+; uint16_t3 a6; // offset 96, size 6
+; };
+%__cblayout_CB = type <{ <3 x float>, target("dx.Padding", 4), <3 x double>, <2 x half>, target("dx.Padding", 4), <3 x i64>, target("dx.Padding", 8), <4 x i32>, <3 x i16> }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h.i.i = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h.i.i, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 8
+
+ ;; a1
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[X:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 2
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x float> poison, float [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x float> [[VEC0]], float [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x float> [[VEC1]], float [[Z]], i32 2
+ ; CHECK: store <3 x float> [[VEC2]], ptr %dst
+ %a1_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1 = load <3 x float>, ptr addrspace(2) %a1_gep, align 16
+ store <3 x float> %a1, ptr %dst, align 4
+
+ ;; a2
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { double, double } [[LOAD]], 1
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[Z:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x double> poison, double [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x double> [[VEC0]], double [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x double> [[VEC1]], double [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ ; CHECK: store <3 x double> [[VEC2]], ptr [[PTR]]
+ %a2_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 16)
+ %a2 = load <3 x double>, ptr addrspace(2) %a2_gep, align 32
+ %a2.i = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ store <3 x double> %a2, ptr %a2.i, align 8
+
+ ;; a3
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { half, half, half, half, half, half, half, half } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[X:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 4
+ ; CHECK: [[Y:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 5
+ ; CHECK: [[VEC0:%.*]] = insertelement <2 x half> poison, half [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <2 x half> [[VEC0]], half [[Y]], i32 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ ; CHECK: store <2 x half> [[VEC1]], ptr [[PTR]]
+ %a3_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 40)
+ %a3 = load <2 x half>, ptr addrspace(2) %a3_gep, align 4
+ %a3.i = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ store <2 x half> %a3, ptr %a3.i, align 2
+
+ ;; a4
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 3)
+ ; CHECK: [[X:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i64, i64 } [[LOAD]], 1
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 4)
+ ; CHECK: [[Z:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x i64> poison, i64 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x i64> [[VEC0]], i64 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x i64> [[VEC1]], i64 [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ ; CHECK: store <3 x i64> [[VEC2]], ptr [[PTR]]
+ %a4_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %a4 = load <3 x i64>, ptr addrspace(2) %a4_gep, align 32
+ %a4.i = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ store <3 x i64> %a4, ptr %a4.i, align 8
+
+ ;; a5
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 5)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: [[A:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ; CHECK: [[VEC0:%.*]] = insertelement <4 x i32> poison, i32 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC0]], i32 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <4 x i32> [[VEC1]], i32 [[Z]], i32 2
+ ; CHECK: [[VEC3:%.*]] = insertelement <4 x i32> [[VEC2]], i32 [[A]], i32 3
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ ; CHECK: store <4 x i32> [[VEC3]], ptr [[PTR]]
+ %a5_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 80)
+ %a5 = load <4 x i32>, ptr addrspace(2) %a5_gep, align 16
+ %a5.i = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ store <4 x i32> %a5, ptr %a5.i, align 4
+
+ ;; a6
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i16, i16, i16, i16, i16, i16, i16, i16 } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 6)
+ ; CHECK: [[X:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 2
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x i16> poison, i16 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x i16> [[VEC0]], i16 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x i16> [[VEC1]], i16 [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 88
+ ; CHECK: store <3 x i16> [[VEC2]], ptr [[PTR]]
+ %a6_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 96)
+ %a6 = load <3 x i16>, ptr addrspace(2) %a6_gep, align 8
+ %a6.i = getelementptr inbounds nuw i8, ptr %dst, i32 88
+ store <3 x i16> %a6, ptr %a6.i, align 2
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/Hexagon/instrprof-custom.ll b/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
index 620b2ac..1c1965d 100644
--- a/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
+++ b/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
@@ -1,5 +1,5 @@
; RUN: llc -mtriple=hexagon -relocation-model=pic < %s | FileCheck %s
-; RUN: llc -mtriple=hexagon < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon --mattr=+hvxv68,+hvx-length128b,+hvx-qfloat,-hvx-ieee-fp < %s | FileCheck %s
; CHECK-LABEL: test1:
; CHECK: {{call my_instrprof_handler|r0 = #999}}
@@ -14,7 +14,4 @@ entry:
}
; Function Attrs: inaccessiblememonly nofree nosync nounwind willreturn
-declare void @llvm.hexagon.instrprof.custom(ptr, i32) #1
-
-attributes #0 = { "target-features"="+hvxv68,+hvx-length128b,+hvx-qfloat,-hvx-ieee-fp,+hmxv68" }
-attributes #1 = { inaccessiblememonly nofree nosync nounwind willreturn }
+declare void @llvm.hexagon.instrprof.custom(ptr, i32)
diff --git a/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll b/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll
deleted file mode 100644
index 9dd402d..0000000
--- a/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; REQUIRES: have_tflite
-; REQUIRES: x86_64-linux
-;
-; Check that we log the currently in development features correctly with both the default
-; case and with a learned policy.
-;
-; RUN: llc -o /dev/null -mtriple=x86_64-linux-unknown -regalloc=greedy \
-; RUN: -regalloc-enable-advisor=development \
-; RUN: -regalloc-training-log=%t1 \
-; RUN: -regalloc-enable-development-features < %S/Inputs/input.ll
-; RUN: %python %S/../../../lib/Analysis/models/log_reader.py %t1 > %t1.readable
-; RUN: FileCheck --input-file %t1.readable %s
-
-; RUN: rm -rf %t && mkdir %t
-; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t_savedmodel
-; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
-; RUN: llc -o /dev/null -mtriple=x86_64-linux-unknown -regalloc=greedy \
-; RUN: -regalloc-enable-advisor=development \
-; RUN: -regalloc-training-log=%t2 -regalloc-model=%t \
-; RUN: -regalloc-enable-development-features < %S/Inputs/input.ll
-; RUN: %python %S/../../../lib/Analysis/models/log_reader.py %t2 > %t2.readable
-; RUN: FileCheck --input-file %t2.readable %s
-
-; CHECK-NOT: nan
-; Check the first five opcodes in the first eviction problem
-; Also, the first eviction problem is significantly less than 300 instructions. Check
-; that there is a zero value.
-; Note: we're regex-ing some of the opcodes to avoid test flakyness.
-; CHECK: instructions: 20,{{([0-9]{4})}},{{([0-9]{4})}},{{([0-9]{4})}},{{.*}},0,
-; Only the candidate virtreg and the 10th LR are included in this problem. Make
-; sure the other LRs have values of zero. There are 2700 0s followed by some 1s.
-; There's a limit to how many repetitions can be matched.
-; CHECK: instructions_mapping: {{(((0,){27}){100})}}
-; CHECK-SAME: 1
-; Indexing 300 back from where the candidate vr actual resides due to the fact
-; that not all the values between the 10th LR and the candidate are zero.
-; CHECK-SAME-COUNT-6600: 0,
-; CHECK-SAME: 1
-; Ensure that we can still go through the mapping matrices for the rest of the
-; eviction problems to make sure we haven't hit the end of the matrix above.
-; There are a total of 23 eviction problems with this test.
-; CHECK-LABEL: observation: 16
-; Make sure that we're exporting the mbb_frequencies. Don't actually check
-; values due to all values being floating point/liable to change very easily.
-; CHECK: mbb_frequencies:
-; Make sure that we have the mbb_mapping feature, and that the first couple
-; of values are correct.
-; CHECK: mbb_mapping: 0,0,0,0,1,1,1
diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincos.ll b/llvm/test/CodeGen/PowerPC/llvm.sincos.ll
index aaf81ff..5b4e91c 100644
--- a/llvm/test/CodeGen/PowerPC/llvm.sincos.ll
+++ b/llvm/test/CodeGen/PowerPC/llvm.sincos.ll
@@ -26,30 +26,6 @@ define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128(ppc_fp128 %a) {
ret { ppc_fp128, ppc_fp128 } %result
}
-define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128(ppc_fp128 %a) {
-; CHECK-LABEL: test_sincospi_ppcf128:
-; CHECK: # %bb.0:
-; CHECK-NEXT: mflr r0
-; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: addi r5, r1, 48
-; CHECK-NEXT: addi r6, r1, 32
-; CHECK-NEXT: bl sincospil
-; CHECK-NEXT: nop
-; CHECK-NEXT: lfd f1, 48(r1)
-; CHECK-NEXT: lfd f2, 56(r1)
-; CHECK-NEXT: lfd f3, 32(r1)
-; CHECK-NEXT: lfd f4, 40(r1)
-; CHECK-NEXT: addi r1, r1, 64
-; CHECK-NEXT: ld r0, 16(r1)
-; CHECK-NEXT: mtlr r0
-; CHECK-NEXT: blr
- %result = call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
- ret { ppc_fp128, ppc_fp128 } %result
-}
-
; FIXME: This could be made a tail call with the default expansion of llvm.sincos.
define void @test_sincos_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) {
; CHECK-LABEL: test_sincos_ppcf128_void_tail_call:
@@ -73,29 +49,6 @@ define void @test_sincos_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_s
ret void
}
-; FIXME: This could be made a tail call with the default expansion of llvm.sincospi.
-define void @test_sincospi_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) {
-; CHECK-LABEL: test_sincospi_ppcf128_void_tail_call:
-; CHECK: # %bb.0:
-; CHECK-NEXT: mflr r0
-; CHECK-NEXT: stdu r1, -32(r1)
-; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: bl sincospil
-; CHECK-NEXT: nop
-; CHECK-NEXT: addi r1, r1, 32
-; CHECK-NEXT: ld r0, 16(r1)
-; CHECK-NEXT: mtlr r0
-; CHECK-NEXT: blr
- %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
- %result.0 = extractvalue { ppc_fp128, ppc_fp128 } %result, 0
- %result.1 = extractvalue { ppc_fp128, ppc_fp128 } %result, 1
- store ppc_fp128 %result.0, ptr %out_sin, align 16
- store ppc_fp128 %result.1, ptr %out_cos, align 16
- ret void
-}
-
; NOTE: This would need a struct-return library call for llvm.sincos to become a tail call.
define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128_tail_call(ppc_fp128 %a) {
; CHECK-LABEL: test_sincos_ppcf128_tail_call:
@@ -120,28 +73,3 @@ define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128_tail_call(ppc_fp128 %a) {
%result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincos.ppcf128(ppc_fp128 %a)
ret { ppc_fp128, ppc_fp128 } %result
}
-
-; NOTE: This would need a struct-return library call for llvm.sincospi to become a tail call.
-define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128_tail_call(ppc_fp128 %a) {
-; CHECK-LABEL: test_sincospi_ppcf128_tail_call:
-; CHECK: # %bb.0:
-; CHECK-NEXT: mflr r0
-; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: addi r5, r1, 48
-; CHECK-NEXT: addi r6, r1, 32
-; CHECK-NEXT: bl sincospil
-; CHECK-NEXT: nop
-; CHECK-NEXT: lfd f1, 48(r1)
-; CHECK-NEXT: lfd f2, 56(r1)
-; CHECK-NEXT: lfd f3, 32(r1)
-; CHECK-NEXT: lfd f4, 40(r1)
-; CHECK-NEXT: addi r1, r1, 64
-; CHECK-NEXT: ld r0, 16(r1)
-; CHECK-NEXT: mtlr r0
-; CHECK-NEXT: blr
- %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
- ret { ppc_fp128, ppc_fp128 } %result
-}
diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll
new file mode 100644
index 0000000..75e7559
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll
@@ -0,0 +1,21 @@
+; RUN: not llc -mtriple=powerpc64le-gnu-linux -filetype=null %s 2>&1 | FileCheck %s
+
+; CHECK: error: no libcall available for fsincospi
+define { half, half } @test_sincospi_f16(half %a) #0 {
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ ret { half, half } %result
+}
+
+; CHECK: error: no libcall available for fsincospi
+define { float, float } @test_sincospi_f32(float %a) #0 {
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+; CHECK: error: no libcall available for fsincospi
+define { double, double } @test_sincospi_f64(double %a) #0 {
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll
new file mode 100644
index 0000000..bc656bb
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll
@@ -0,0 +1,25 @@
+; XFAIL: *
+; FIXME: asserts
+; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-gnu-linux -filetype=null \
+; RUN: -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names %s
+
+define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128(ppc_fp128 %a) {
+ %result = call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
+ ret { ppc_fp128, ppc_fp128 } %result
+}
+
+; FIXME: This could be made a tail call with the default expansion of llvm.sincospi.
+define void @test_sincospi_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) {
+ %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
+ %result.0 = extractvalue { ppc_fp128, ppc_fp128 } %result, 0
+ %result.1 = extractvalue { ppc_fp128, ppc_fp128 } %result, 1
+ store ppc_fp128 %result.0, ptr %out_sin, align 16
+ store ppc_fp128 %result.1, ptr %out_cos, align 16
+ ret void
+}
+
+; NOTE: This would need a struct-return library call for llvm.sincospi to become a tail call.
+define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128_tail_call(ppc_fp128 %a) {
+ %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
+ ret { ppc_fp128, ppc_fp128 } %result
+}
diff --git a/llvm/test/CodeGen/PowerPC/milicode64.ll b/llvm/test/CodeGen/PowerPC/milicode64.ll
index f7814a4..2dbf414 100644
--- a/llvm/test/CodeGen/PowerPC/milicode64.ll
+++ b/llvm/test/CodeGen/PowerPC/milicode64.ll
@@ -156,7 +156,7 @@ define ptr @test_memmove(ptr noundef %destination, ptr noundef %source, i64 noun
; CHECK-AIX-64-P9-NEXT: std r3, 128(r1)
; CHECK-AIX-64-P9-NEXT: std r4, 120(r1)
; CHECK-AIX-64-P9-NEXT: std r5, 112(r1)
-; CHECK-AIX-64-P9-NEXT: bl .memmove[PR]
+; CHECK-AIX-64-P9-NEXT: bl .___memmove64[PR]
; CHECK-AIX-64-P9-NEXT: nop
; CHECK-AIX-64-P9-NEXT: mr r3, r31
; CHECK-AIX-64-P9-NEXT: ld r31, 136(r1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
index 260394b..fb550bb 100644
--- a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
+++ b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
@@ -7,9 +7,11 @@ entry:
; CHECK-SPIRV: OpDecorate %[[#PId:]] Volatile
; CHECK-SPIRV: OpDecorate %[[#PId]] FuncParamAttr NoAlias
+; CHECK-SPIRV: OpDecorate %[[#PId]] FuncParamAttr NoWrite
; CHECK-SPIRV: %[[#PId]] = OpFunctionParameter %[[#]]
!7 = !{!"volatile"}
!8 = !{i32 38, i32 4} ; FuncParamAttr NoAlias
-!9 = !{!8}
+!11 = !{i32 38, i32 6} ; FuncParamAttr NoWrite
+!9 = !{!8, !11}
!10 = !{!9}
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll
index 79665af..9632469 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll
@@ -7,22 +7,22 @@ define dso_local i32 @test_500_504(ptr nocapture readonly %x) {
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #126
-; CHECK-NEXT: adr r2, .LCPI0_0
-; CHECK-NEXT: vldrw.u32 q0, [r2]
-; CHECK-NEXT: mov.w r2, #500
-; CHECK-NEXT: vdup.32 q1, r2
-; CHECK-NEXT: movs r1, #0
+; CHECK-NEXT: adr r1, .LCPI0_0
+; CHECK-NEXT: vldrw.u32 q0, [r1]
+; CHECK-NEXT: mov.w r1, #500
+; CHECK-NEXT: mov.w r12, #0
+; CHECK-NEXT: vdup.32 q1, r1
; CHECK-NEXT: movs r2, #0
; CHECK-NEXT: .LBB0_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vqadd.u32 q2, q0, r1
-; CHECK-NEXT: adds r1, #4
+; CHECK-NEXT: vqadd.u32 q2, q0, r2
+; CHECK-NEXT: adds r2, #4
; CHECK-NEXT: vptt.u32 hi, q1, q2
; CHECK-NEXT: vldrwt.u32 q2, [r0], #16
-; CHECK-NEXT: vaddvat.u32 r2, q2
+; CHECK-NEXT: vaddvat.u32 r12, q2
; CHECK-NEXT: le lr, .LBB0_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
-; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: pop {r7, pc}
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: @ %bb.3:
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
index ec257bc..bcedcd4 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
@@ -28,29 +28,29 @@ define void @arm_min_q31(ptr nocapture readonly %pSrc, i32 %blockSize, ptr nocap
; CHECK-NEXT: str r6, [sp] @ 4-byte Spill
; CHECK-NEXT: subs r7, #4
; CHECK-NEXT: movs r6, #1
-; CHECK-NEXT: mov.w r8, #0
; CHECK-NEXT: mov.w r10, #0
+; CHECK-NEXT: mov.w r8, #0
; CHECK-NEXT: add.w lr, r6, r7, lsr #2
; CHECK-NEXT: .LBB0_5: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr r11, [r0, #16]!
-; CHECK-NEXT: ldrd r5, r7, [r0, #-12]
+; CHECK-NEXT: ldrd r5, r6, [r0, #-12]
; CHECK-NEXT: ldr r4, [r0, #-4]
; CHECK-NEXT: cmp r12, r5
; CHECK-NEXT: csel r5, r5, r12, gt
-; CHECK-NEXT: csinc r6, r10, r8, le
-; CHECK-NEXT: cmp r5, r7
+; CHECK-NEXT: csinc r7, r10, r8, le
+; CHECK-NEXT: cmp r5, r6
; CHECK-NEXT: it gt
-; CHECK-NEXT: addgt.w r6, r8, #2
-; CHECK-NEXT: csel r7, r7, r5, gt
-; CHECK-NEXT: cmp r7, r4
+; CHECK-NEXT: addgt.w r7, r8, #2
+; CHECK-NEXT: csel r6, r6, r5, gt
+; CHECK-NEXT: cmp r6, r4
; CHECK-NEXT: it gt
-; CHECK-NEXT: addgt.w r6, r8, #3
-; CHECK-NEXT: csel r7, r4, r7, gt
+; CHECK-NEXT: addgt.w r7, r8, #3
+; CHECK-NEXT: csel r6, r4, r6, gt
; CHECK-NEXT: add.w r8, r8, #4
-; CHECK-NEXT: cmp r7, r11
-; CHECK-NEXT: csel r10, r8, r6, gt
-; CHECK-NEXT: csel r12, r11, r7, gt
+; CHECK-NEXT: cmp r6, r11
+; CHECK-NEXT: csel r10, r8, r7, gt
+; CHECK-NEXT: csel r12, r11, r6, gt
; CHECK-NEXT: le lr, .LBB0_5
; CHECK-NEXT: @ %bb.6: @ %while.end.loopexit.unr-lcssa.loopexit
; CHECK-NEXT: ldr r6, [sp] @ 4-byte Reload
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
index 1769c5d..98e082b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
@@ -21,11 +21,12 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: it lt
; ENABLED-NEXT: bxlt lr
; ENABLED-NEXT: .LBB0_1: @ %for.body.lr.ph
-; ENABLED-NEXT: push.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; ENABLED-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; ENABLED-NEXT: mov r11, r0
-; ENABLED-NEXT: ldr r0, [sp, #32]
+; ENABLED-NEXT: ldr r0, [sp, #36]
; ENABLED-NEXT: add.w r9, r2, #3
; ENABLED-NEXT: mov.w r12, #0
+; ENABLED-NEXT: mov.w r8, #1
; ENABLED-NEXT: mov r10, r11
; ENABLED-NEXT: uxth r0, r0
; ENABLED-NEXT: rsbs r5, r0, #0
@@ -49,18 +50,16 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: @ %bb.5: @ %vector.ph
; ENABLED-NEXT: @ in Loop: Header=BB0_4 Depth=1
; ENABLED-NEXT: bic r0, r9, #3
-; ENABLED-NEXT: movs r7, #1
-; ENABLED-NEXT: subs r0, #4
; ENABLED-NEXT: sub.w r4, r2, r12
+; ENABLED-NEXT: subs r0, #4
; ENABLED-NEXT: vmov.i32 q1, #0x0
-; ENABLED-NEXT: add.w r6, r7, r0, lsr #2
+; ENABLED-NEXT: mov r7, r10
+; ENABLED-NEXT: add.w r6, r8, r0, lsr #2
; ENABLED-NEXT: adds r0, r2, #3
; ENABLED-NEXT: sub.w r0, r0, r12
; ENABLED-NEXT: bic r0, r0, #3
; ENABLED-NEXT: subs r0, #4
-; ENABLED-NEXT: add.w r0, r7, r0, lsr #2
-; ENABLED-NEXT: mov r7, r10
-; ENABLED-NEXT: dls lr, r0
+; ENABLED-NEXT: add.w lr, r8, r0, lsr #2
; ENABLED-NEXT: mov r0, r11
; ENABLED-NEXT: .LBB0_6: @ %vector.body
; ENABLED-NEXT: @ Parent Loop BB0_4 Depth=1
@@ -83,7 +82,7 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: vaddv.u32 r0, q0
; ENABLED-NEXT: b .LBB0_3
; ENABLED-NEXT: .LBB0_8:
-; ENABLED-NEXT: pop.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; ENABLED-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; ENABLED-NEXT: bx lr
;
; NOREDUCTIONS-LABEL: varying_outer_2d_reduction:
@@ -92,11 +91,12 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: it lt
; NOREDUCTIONS-NEXT: bxlt lr
; NOREDUCTIONS-NEXT: .LBB0_1: @ %for.body.lr.ph
-; NOREDUCTIONS-NEXT: push.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; NOREDUCTIONS-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; NOREDUCTIONS-NEXT: mov r11, r0
-; NOREDUCTIONS-NEXT: ldr r0, [sp, #32]
+; NOREDUCTIONS-NEXT: ldr r0, [sp, #36]
; NOREDUCTIONS-NEXT: add.w r9, r2, #3
; NOREDUCTIONS-NEXT: mov.w r12, #0
+; NOREDUCTIONS-NEXT: mov.w r8, #1
; NOREDUCTIONS-NEXT: mov r10, r11
; NOREDUCTIONS-NEXT: uxth r0, r0
; NOREDUCTIONS-NEXT: rsbs r5, r0, #0
@@ -120,18 +120,16 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: @ %bb.5: @ %vector.ph
; NOREDUCTIONS-NEXT: @ in Loop: Header=BB0_4 Depth=1
; NOREDUCTIONS-NEXT: bic r0, r9, #3
-; NOREDUCTIONS-NEXT: movs r7, #1
-; NOREDUCTIONS-NEXT: subs r0, #4
; NOREDUCTIONS-NEXT: sub.w r4, r2, r12
+; NOREDUCTIONS-NEXT: subs r0, #4
; NOREDUCTIONS-NEXT: vmov.i32 q1, #0x0
-; NOREDUCTIONS-NEXT: add.w r6, r7, r0, lsr #2
+; NOREDUCTIONS-NEXT: mov r7, r10
+; NOREDUCTIONS-NEXT: add.w r6, r8, r0, lsr #2
; NOREDUCTIONS-NEXT: adds r0, r2, #3
; NOREDUCTIONS-NEXT: sub.w r0, r0, r12
; NOREDUCTIONS-NEXT: bic r0, r0, #3
; NOREDUCTIONS-NEXT: subs r0, #4
-; NOREDUCTIONS-NEXT: add.w r0, r7, r0, lsr #2
-; NOREDUCTIONS-NEXT: mov r7, r10
-; NOREDUCTIONS-NEXT: dls lr, r0
+; NOREDUCTIONS-NEXT: add.w lr, r8, r0, lsr #2
; NOREDUCTIONS-NEXT: mov r0, r11
; NOREDUCTIONS-NEXT: .LBB0_6: @ %vector.body
; NOREDUCTIONS-NEXT: @ Parent Loop BB0_4 Depth=1
@@ -154,7 +152,7 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: vaddv.u32 r0, q0
; NOREDUCTIONS-NEXT: b .LBB0_3
; NOREDUCTIONS-NEXT: .LBB0_8:
-; NOREDUCTIONS-NEXT: pop.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; NOREDUCTIONS-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; NOREDUCTIONS-NEXT: bx lr
entry:
%conv = sext i16 %N to i32
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
index cbcbf1f..435acc2 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
@@ -165,74 +165,73 @@ define dso_local i32 @b(ptr %c, i32 %d, i32 %e, ptr %n) "frame-pointer"="all" {
; CHECK-NEXT: sub sp, #16
; CHECK-NEXT: wls lr, r1, .LBB2_3
; CHECK-NEXT: @ %bb.1: @ %while.body.preheader
-; CHECK-NEXT: adds r6, r3, #4
-; CHECK-NEXT: adds r1, r0, #4
+; CHECK-NEXT: add.w r9, r3, #4
+; CHECK-NEXT: add.w r10, r0, #4
; CHECK-NEXT: mvn r8, #1
-; CHECK-NEXT: @ implicit-def: $r9
+; CHECK-NEXT: @ implicit-def: $r6
; CHECK-NEXT: @ implicit-def: $r4
; CHECK-NEXT: str r2, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB2_2: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: ldr.w r1, [r10]
; CHECK-NEXT: asrs r2, r4, #31
-; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: ldr r1, [r1]
+; CHECK-NEXT: str r6, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: muls r1, r3, r1
; CHECK-NEXT: adds r4, r4, r1
; CHECK-NEXT: adc.w r1, r2, r1, asr #31
; CHECK-NEXT: adds.w r2, r4, #-2147483648
-; CHECK-NEXT: ldrd r2, r4, [r8]
-; CHECK-NEXT: adc r5, r1, #0
-; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: smull r4, r2, r4, r9
-; CHECK-NEXT: asrs r1, r5, #31
+; CHECK-NEXT: ldrd r5, r4, [r8]
+; CHECK-NEXT: adc r2, r1, #0
; CHECK-NEXT: str r5, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: subs r4, r5, r4
-; CHECK-NEXT: sbcs r1, r2
-; CHECK-NEXT: ldr r2, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: adds.w r10, r4, #-2147483648
-; CHECK-NEXT: adc r1, r1, #0
-; CHECK-NEXT: ldr r4, [r2, #-4]
+; CHECK-NEXT: smull r4, r5, r4, r6
+; CHECK-NEXT: asrs r1, r2, #31
+; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: subs r4, r2, r4
+; CHECK-NEXT: sbcs r1, r5
+; CHECK-NEXT: adds.w r6, r4, #-2147483648
+; CHECK-NEXT: ldr r4, [r10, #-4]
+; CHECK-NEXT: adc r11, r1, #0
+; CHECK-NEXT: mov r1, r9
+; CHECK-NEXT: add.w r10, r10, #4
; CHECK-NEXT: muls r4, r3, r4
; CHECK-NEXT: adds r3, #4
; CHECK-NEXT: adds.w r12, r4, #-2147483648
; CHECK-NEXT: asr.w r5, r4, #31
-; CHECK-NEXT: ldr r4, [r6]
+; CHECK-NEXT: ldr.w r4, [r9]
; CHECK-NEXT: adc r5, r5, #0
; CHECK-NEXT: mul r2, r4, r0
-; CHECK-NEXT: adds r0, #4
; CHECK-NEXT: add.w r2, r2, #-2147483648
; CHECK-NEXT: asrl r12, r5, r2
-; CHECK-NEXT: smull r2, r5, r4, r12
-; CHECK-NEXT: lsll r2, r5, #30
-; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload
-; CHECK-NEXT: asr.w r11, r5, #31
-; CHECK-NEXT: mov r12, r5
-; CHECK-NEXT: lsll r12, r11, r4
-; CHECK-NEXT: mul r2, r2, r9
-; CHECK-NEXT: lsrl r12, r11, #2
-; CHECK-NEXT: adds r2, #2
-; CHECK-NEXT: lsll r12, r11, r2
+; CHECK-NEXT: smull r2, r9, r4, r12
+; CHECK-NEXT: mov r12, r0
+; CHECK-NEXT: lsll r2, r9, #30
+; CHECK-NEXT: asr.w r5, r9, #31
+; CHECK-NEXT: mov r2, r9
+; CHECK-NEXT: mov r9, r1
+; CHECK-NEXT: ldrd r1, r0, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT: lsll r2, r5, r4
+; CHECK-NEXT: lsrl r2, r5, #2
+; CHECK-NEXT: muls r0, r1, r0
+; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: adds r0, #2
+; CHECK-NEXT: lsll r2, r5, r0
+; CHECK-NEXT: add.w r0, r2, #-2147483648
; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload
-; CHECK-NEXT: add.w r5, r12, #-2147483648
-; CHECK-NEXT: asrl r10, r1, r5
-; CHECK-NEXT: ldr r5, [sp, #8] @ 4-byte Reload
-; CHECK-NEXT: lsrl r10, r1, #2
-; CHECK-NEXT: movs r1, #2
-; CHECK-NEXT: mov r9, r10
-; CHECK-NEXT: str.w r10, [r1]
-; CHECK-NEXT: ldr r1, [r8], #-4
-; CHECK-NEXT: mls r5, r1, r4, r5
-; CHECK-NEXT: adds.w r4, r5, #-2147483648
-; CHECK-NEXT: asr.w r1, r5, #31
+; CHECK-NEXT: asrl r6, r11, r0
+; CHECK-NEXT: movs r0, #2
+; CHECK-NEXT: lsrl r6, r11, #2
+; CHECK-NEXT: str r6, [r0]
+; CHECK-NEXT: ldr r0, [r8], #-4
+; CHECK-NEXT: mls r0, r0, r4, r1
+; CHECK-NEXT: adds.w r4, r0, #-2147483648
+; CHECK-NEXT: asr.w r1, r0, #31
; CHECK-NEXT: adc r1, r1, #0
; CHECK-NEXT: lsrl r4, r1, #2
-; CHECK-NEXT: rsbs r1, r4, #0
-; CHECK-NEXT: str r1, [r2]
-; CHECK-NEXT: str r1, [r6, #-4]
-; CHECK-NEXT: adds r6, #4
-; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: adds r1, #4
+; CHECK-NEXT: rsbs r0, r4, #0
+; CHECK-NEXT: str r0, [r2]
+; CHECK-NEXT: str r0, [r9, #-4]
+; CHECK-NEXT: add.w r9, r9, #4
+; CHECK-NEXT: add.w r0, r12, #4
; CHECK-NEXT: le lr, .LBB2_2
; CHECK-NEXT: .LBB2_3: @ %while.end
; CHECK-NEXT: add sp, #16
diff --git a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
index f7b4548..b6657d6 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
@@ -1573,120 +1573,115 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(ptr nocapture readonly %
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: ldrd r7, r9, [r0]
-; CHECK-NEXT: and r6, r3, #3
-; CHECK-NEXT: ldr r0, [r0, #8]
-; CHECK-NEXT: lsrs r3, r3, #2
-; CHECK-NEXT: @ implicit-def: $r12
-; CHECK-NEXT: str r6, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: str r3, [sp] @ 4-byte Spill
-; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: ldm.w r0, {r7, r9, r11}
+; CHECK-NEXT: and r0, r3, #3
+; CHECK-NEXT: @ implicit-def: $r5
+; CHECK-NEXT: str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: lsrs r0, r3, #2
+; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: b .LBB19_3
; CHECK-NEXT: .LBB19_1: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: mov r3, r8
-; CHECK-NEXT: mov r2, r5
-; CHECK-NEXT: mov r4, r11
-; CHECK-NEXT: mov r8, r10
+; CHECK-NEXT: mov r8, r3
+; CHECK-NEXT: mov r3, r12
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r12, r10
; CHECK-NEXT: .LBB19_2: @ %if.end69
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
; CHECK-NEXT: ldr r7, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: adds r0, #128
-; CHECK-NEXT: strd r2, r4, [r9]
-; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
-; CHECK-NEXT: subs r7, #1
-; CHECK-NEXT: strd r3, r8, [r9, #8]
-; CHECK-NEXT: add.w r9, r9, #16
+; CHECK-NEXT: add.w r11, r11, #128
+; CHECK-NEXT: strd r8, r0, [r9]
; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: strd r3, r12, [r9, #8]
+; CHECK-NEXT: add.w r9, r9, #16
+; CHECK-NEXT: subs r7, #1
; CHECK-NEXT: beq.w .LBB19_13
; CHECK-NEXT: .LBB19_3: @ %do.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB19_5 Depth 2
-; CHECK-NEXT: ldrd r5, r11, [r9]
+; CHECK-NEXT: ldr.w r10, [r9, #12]
; CHECK-NEXT: mov r6, r2
-; CHECK-NEXT: ldrd r8, r10, [r9, #8]
-; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload
+; CHECK-NEXT: ldm.w r9, {r3, r4, r12}
+; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT: str r7, [sp, #12] @ 4-byte Spill
-; CHECK-NEXT: wls lr, r2, .LBB19_6
+; CHECK-NEXT: wls lr, r0, .LBB19_6
; CHECK-NEXT: @ %bb.4: @ %while.body.lr.ph
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: ldr r6, [sp, #8] @ 4-byte Reload
-; CHECK-NEXT: mov r4, r11
-; CHECK-NEXT: mov r3, r5
+; CHECK-NEXT: mov r6, r2
; CHECK-NEXT: .LBB19_5: @ %while.body
; CHECK-NEXT: @ Parent Loop BB19_3 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldr r5, [r1, #12]
-; CHECK-NEXT: vldrw.u32 q2, [r0]
-; CHECK-NEXT: vldrw.u32 q6, [r0, #16]
-; CHECK-NEXT: ldm.w r1, {r2, r7, r11}
-; CHECK-NEXT: vmul.f32 q2, q2, r5
-; CHECK-NEXT: vldrw.u32 q7, [r0, #32]
-; CHECK-NEXT: vfma.f32 q2, q6, r11
-; CHECK-NEXT: vldrw.u32 q4, [r0, #48]
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r8, r4
+; CHECK-NEXT: ldrd r4, r3, [r1, #8]
+; CHECK-NEXT: vldrw.u32 q2, [r11]
+; CHECK-NEXT: vldrw.u32 q6, [r11, #16]
+; CHECK-NEXT: ldrd r0, r7, [r1]
+; CHECK-NEXT: vmul.f32 q2, q2, r3
+; CHECK-NEXT: vldrw.u32 q7, [r11, #32]
+; CHECK-NEXT: vfma.f32 q2, q6, r4
+; CHECK-NEXT: vldrw.u32 q4, [r11, #48]
; CHECK-NEXT: vfma.f32 q2, q7, r7
-; CHECK-NEXT: vldrw.u32 q5, [r0, #64]
-; CHECK-NEXT: vfma.f32 q2, q4, r2
-; CHECK-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT: vfma.f32 q2, q5, r3
-; CHECK-NEXT: vldrw.u32 q1, [r0, #96]
-; CHECK-NEXT: vfma.f32 q2, q3, r4
-; CHECK-NEXT: vldrw.u32 q0, [r0, #112]
-; CHECK-NEXT: vfma.f32 q2, q1, r8
+; CHECK-NEXT: vldrw.u32 q5, [r11, #64]
+; CHECK-NEXT: vfma.f32 q2, q4, r0
+; CHECK-NEXT: vldrw.u32 q3, [r11, #80]
+; CHECK-NEXT: vfma.f32 q2, q5, r5
+; CHECK-NEXT: vldrw.u32 q1, [r11, #96]
+; CHECK-NEXT: vfma.f32 q2, q3, r8
+; CHECK-NEXT: vldrw.u32 q0, [r11, #112]
+; CHECK-NEXT: vfma.f32 q2, q1, r12
; CHECK-NEXT: adds r1, #16
; CHECK-NEXT: vfma.f32 q2, q0, r10
-; CHECK-NEXT: mov r4, r11
-; CHECK-NEXT: vmov r10, r8, d5
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: vmov r10, r12, d5
; CHECK-NEXT: vstrb.8 q2, [r6], #16
-; CHECK-NEXT: mov r3, r5
-; CHECK-NEXT: mov r12, r5
; CHECK-NEXT: le lr, .LBB19_5
; CHECK-NEXT: .LBB19_6: @ %while.end
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: ldr r3, [sp, #4] @ 4-byte Reload
-; CHECK-NEXT: cmp r3, #0
+; CHECK-NEXT: ldr r7, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: cmp r7, #0
; CHECK-NEXT: beq .LBB19_1
; CHECK-NEXT: @ %bb.7: @ %if.then
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: ldrd lr, r4, [r1]
-; CHECK-NEXT: vldrw.u32 q0, [r0]
-; CHECK-NEXT: ldrd r2, r1, [r1, #8]
-; CHECK-NEXT: vldrw.u32 q6, [r0, #16]
-; CHECK-NEXT: vldrw.u32 q7, [r0, #32]
-; CHECK-NEXT: vldrw.u32 q4, [r0, #48]
+; CHECK-NEXT: ldrd lr, r0, [r1]
+; CHECK-NEXT: vldrw.u32 q0, [r11]
+; CHECK-NEXT: ldrd r8, r1, [r1, #8]
+; CHECK-NEXT: vldrw.u32 q6, [r11, #16]
+; CHECK-NEXT: vldrw.u32 q7, [r11, #32]
+; CHECK-NEXT: vldrw.u32 q4, [r11, #48]
; CHECK-NEXT: vmul.f32 q0, q0, r1
-; CHECK-NEXT: vldrw.u32 q5, [r0, #64]
-; CHECK-NEXT: vfma.f32 q0, q6, r2
-; CHECK-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT: vfma.f32 q0, q7, r4
-; CHECK-NEXT: vldrw.u32 q2, [r0, #96]
+; CHECK-NEXT: vldrw.u32 q5, [r11, #64]
+; CHECK-NEXT: vfma.f32 q0, q6, r8
+; CHECK-NEXT: vldrw.u32 q3, [r11, #80]
+; CHECK-NEXT: vfma.f32 q0, q7, r0
+; CHECK-NEXT: vldrw.u32 q2, [r11, #96]
; CHECK-NEXT: vfma.f32 q0, q4, lr
-; CHECK-NEXT: vldrw.u32 q1, [r0, #112]
-; CHECK-NEXT: vfma.f32 q0, q5, r5
-; CHECK-NEXT: cmp r3, #1
-; CHECK-NEXT: vfma.f32 q0, q3, r11
-; CHECK-NEXT: vfma.f32 q0, q2, r8
+; CHECK-NEXT: vldrw.u32 q1, [r11, #112]
+; CHECK-NEXT: vfma.f32 q0, q5, r3
+; CHECK-NEXT: cmp r7, #1
+; CHECK-NEXT: vfma.f32 q0, q3, r4
+; CHECK-NEXT: vfma.f32 q0, q2, r12
; CHECK-NEXT: vfma.f32 q0, q1, r10
-; CHECK-NEXT: vmov r5, s0
+; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: bne .LBB19_9
; CHECK-NEXT: @ %bb.8: @ %if.then58
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: str r5, [r6]
-; CHECK-NEXT: mov r2, lr
-; CHECK-NEXT: mov r4, r12
-; CHECK-NEXT: mov r3, r5
+; CHECK-NEXT: str r4, [r6]
+; CHECK-NEXT: mov r8, lr
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: mov r3, r4
; CHECK-NEXT: b .LBB19_12
; CHECK-NEXT: .LBB19_9: @ %if.else
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: vmov r8, s1
-; CHECK-NEXT: cmp r3, #2
+; CHECK-NEXT: vmov r12, s1
+; CHECK-NEXT: cmp r7, #2
; CHECK-NEXT: vstr s1, [r6, #4]
-; CHECK-NEXT: str r5, [r6]
+; CHECK-NEXT: str r4, [r6]
; CHECK-NEXT: bne .LBB19_11
; CHECK-NEXT: @ %bb.10: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: mov r2, r4
-; CHECK-NEXT: mov r3, r8
-; CHECK-NEXT: mov r4, lr
-; CHECK-NEXT: mov r8, r5
+; CHECK-NEXT: mov r8, r0
+; CHECK-NEXT: mov r3, r12
+; CHECK-NEXT: mov r0, lr
+; CHECK-NEXT: mov r12, r4
; CHECK-NEXT: b .LBB19_12
; CHECK-NEXT: .LBB19_11: @ %if.else64
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
@@ -1694,7 +1689,7 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(ptr nocapture readonly %
; CHECK-NEXT: vstr s2, [r6, #8]
; CHECK-NEXT: .LBB19_12: @ %if.end69
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: mov r12, r1
+; CHECK-NEXT: mov r5, r1
; CHECK-NEXT: b .LBB19_2
; CHECK-NEXT: .LBB19_13: @ %do.end
; CHECK-NEXT: add sp, #16
@@ -1901,8 +1896,8 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: ldrd r6, r12, [r0, #4]
; CHECK-NEXT: lsr.w r8, r3, #1
; CHECK-NEXT: ldrb r0, [r0]
@@ -1910,11 +1905,11 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK-NEXT: b .LBB20_3
; CHECK-NEXT: .LBB20_1: @ %if.else
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
-; CHECK-NEXT: vmov.f32 s14, s13
-; CHECK-NEXT: vstr s12, [r6]
+; CHECK-NEXT: vmov.f32 s6, s5
+; CHECK-NEXT: vstr s4, [r6]
; CHECK-NEXT: .LBB20_2: @ %if.end
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
-; CHECK-NEXT: vstr s14, [r6, #4]
+; CHECK-NEXT: vstr s6, [r6, #4]
; CHECK-NEXT: add.w r12, r12, #20
; CHECK-NEXT: adds r6, #8
; CHECK-NEXT: subs r0, #1
@@ -1923,41 +1918,39 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK-NEXT: .LBB20_3: @ %do.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB20_5 Depth 2
-; CHECK-NEXT: vldrw.u32 q2, [r12]
+; CHECK-NEXT: vldrw.u32 q3, [r12]
; CHECK-NEXT: movs r5, #0
-; CHECK-NEXT: vmov q4, q2
+; CHECK-NEXT: vmov q4, q3
; CHECK-NEXT: vshlc q4, r5, #32
-; CHECK-NEXT: vldrw.u32 q1, [r12, #8]
-; CHECK-NEXT: vmov q5, q1
+; CHECK-NEXT: vldrw.u32 q2, [r12, #8]
+; CHECK-NEXT: vmov q5, q2
; CHECK-NEXT: vshlc q5, r5, #32
-; CHECK-NEXT: vldrw.u32 q3, [r6]
-; CHECK-NEXT: vmov.f32 s14, s0
+; CHECK-NEXT: vldrw.u32 q1, [r6]
+; CHECK-NEXT: vmov.f32 s6, s0
; CHECK-NEXT: mov r5, r2
-; CHECK-NEXT: vmov.f32 s15, s0
+; CHECK-NEXT: vmov.f32 s7, s0
; CHECK-NEXT: wls lr, r8, .LBB20_6
; CHECK-NEXT: @ %bb.4: @ %while.body.preheader
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
-; CHECK-NEXT: vmov q6, q3
; CHECK-NEXT: mov r5, r2
; CHECK-NEXT: .LBB20_5: @ %while.body
; CHECK-NEXT: @ Parent Loop BB20_3 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
; CHECK-NEXT: ldrd r7, r4, [r1], #8
-; CHECK-NEXT: vfma.f32 q6, q2, r7
-; CHECK-NEXT: vmov r7, s24
-; CHECK-NEXT: vmov q3, q6
-; CHECK-NEXT: vfma.f32 q3, q1, r7
-; CHECK-NEXT: vstr s24, [r5]
-; CHECK-NEXT: vmov.f32 s15, s0
-; CHECK-NEXT: vfma.f32 q3, q4, r4
-; CHECK-NEXT: vmov r4, s13
-; CHECK-NEXT: vstr s13, [r5, #4]
-; CHECK-NEXT: vfma.f32 q3, q5, r4
+; CHECK-NEXT: vfma.f32 q1, q3, r7
+; CHECK-NEXT: vmov r7, s4
+; CHECK-NEXT: vmov.f32 s2, s4
+; CHECK-NEXT: vfma.f32 q1, q2, r7
+; CHECK-NEXT: vmov.f32 s7, s0
+; CHECK-NEXT: vfma.f32 q1, q4, r4
+; CHECK-NEXT: vmov r4, s5
+; CHECK-NEXT: vstr s5, [r5, #4]
+; CHECK-NEXT: vfma.f32 q1, q5, r4
+; CHECK-NEXT: vmov.f32 s4, s6
+; CHECK-NEXT: vmov.f32 s5, s7
+; CHECK-NEXT: vmov.f32 s6, s0
+; CHECK-NEXT: vstr s2, [r5]
; CHECK-NEXT: adds r5, #8
-; CHECK-NEXT: vmov.f32 s12, s14
-; CHECK-NEXT: vmov.f32 s13, s15
-; CHECK-NEXT: vmov.f32 s14, s0
-; CHECK-NEXT: vmov q6, q3
; CHECK-NEXT: le lr, .LBB20_5
; CHECK-NEXT: .LBB20_6: @ %while.end
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
@@ -1966,14 +1959,14 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK-NEXT: @ %bb.7: @ %if.then
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
; CHECK-NEXT: ldr r1, [r1]
-; CHECK-NEXT: vfma.f32 q3, q2, r1
-; CHECK-NEXT: vmov r1, s12
-; CHECK-NEXT: vstr s12, [r5]
-; CHECK-NEXT: vfma.f32 q3, q1, r1
-; CHECK-NEXT: vstr s13, [r6]
+; CHECK-NEXT: vfma.f32 q1, q3, r1
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vstr s4, [r5]
+; CHECK-NEXT: vfma.f32 q1, q2, r1
+; CHECK-NEXT: vstr s5, [r6]
; CHECK-NEXT: b .LBB20_2
; CHECK-NEXT: .LBB20_8: @ %do.end
-; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.9:
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index 0d86f22..b60ee7c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -1313,27 +1313,29 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: @ Child Loop BB16_3 Depth 2
; CHECK-NEXT: ldr.w r8, [sp, #56] @ 4-byte Reload
; CHECK-NEXT: vldrw.u32 q5, [sp] @ 16-byte Reload
-; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q7, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: vmov q4, q3
; CHECK-NEXT: .LBB16_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB16_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: vadd.i32 q1, q5, r0
+; CHECK-NEXT: vmov q0, q6
+; CHECK-NEXT: vadd.i32 q6, q5, r0
+; CHECK-NEXT: vmov r7, r3, d13
; CHECK-NEXT: vadd.i32 q2, q4, r0
-; CHECK-NEXT: vmov r7, r3, d3
-; CHECK-NEXT: vadd.i32 q6, q0, lr
; CHECK-NEXT: vmov r5, r6, d5
+; CHECK-NEXT: vmov q1, q7
+; CHECK-NEXT: vmov r4, r10, d12
+; CHECK-NEXT: vadd.i32 q6, q0, lr
; CHECK-NEXT: subs.w r9, r9, #16
-; CHECK-NEXT: vmov r4, r10, d2
-; CHECK-NEXT: vadd.i32 q1, q7, lr
; CHECK-NEXT: vadd.i32 q4, q4, lr
; CHECK-NEXT: vadd.i32 q5, q5, lr
+; CHECK-NEXT: vadd.i32 q7, q7, lr
; CHECK-NEXT: ldrb.w r11, [r3]
; CHECK-NEXT: ldrb r3, [r7]
; CHECK-NEXT: vmov r7, r12, d4
-; CHECK-NEXT: vadd.i32 q2, q7, r0
-; CHECK-NEXT: vadd.i32 q7, q0, r0
+; CHECK-NEXT: vadd.i32 q2, q1, r0
+; CHECK-NEXT: vadd.i32 q1, q0, r0
; CHECK-NEXT: ldrb r5, [r5]
; CHECK-NEXT: ldrb r6, [r6]
; CHECK-NEXT: ldrb r4, [r4]
@@ -1342,7 +1344,7 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: ldrb.w r1, [r12]
; CHECK-NEXT: vmov.8 q0[0], r7
; CHECK-NEXT: vmov.8 q0[1], r1
-; CHECK-NEXT: vmov r1, r7, d15
+; CHECK-NEXT: vmov r1, r7, d3
; CHECK-NEXT: vmov.8 q0[2], r5
; CHECK-NEXT: vmov.8 q0[3], r6
; CHECK-NEXT: vmov.8 q0[4], r4
@@ -1357,8 +1359,7 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: ldrb r3, [r5]
; CHECK-NEXT: ldrb.w r12, [r7]
; CHECK-NEXT: ldrb r5, [r4]
-; CHECK-NEXT: vmov r4, r7, d14
-; CHECK-NEXT: vmov q7, q1
+; CHECK-NEXT: vmov r4, r7, d2
; CHECK-NEXT: ldrb r4, [r4]
; CHECK-NEXT: ldrb r7, [r7]
; CHECK-NEXT: vmov.8 q0[8], r4
@@ -1370,7 +1371,6 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: vmov.8 q0[14], r3
; CHECK-NEXT: vmov.8 q0[15], r12
; CHECK-NEXT: vstrb.8 q0, [r8], #16
-; CHECK-NEXT: vmov q0, q6
; CHECK-NEXT: bne .LBB16_3
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB16_2 Depth=1
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
index eedca2c..c0b2da7 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
@@ -236,11 +236,11 @@ define arm_aapcs_vfpcc void @push_out_mul_gather_scatter(ptr noalias nocapture r
; CHECK-NEXT: vldrw.u32 q1, [r1]
; CHECK-NEXT: .LBB5_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrw.u32 q2, [r0, q1, uxtw #2]
-; CHECK-NEXT: vadd.i32 q3, q1, q0
+; CHECK-NEXT: vldrw.u32 q3, [r0, q1, uxtw #2]
; CHECK-NEXT: subs r2, #4
-; CHECK-NEXT: vstrw.32 q2, [r0, q1, uxtw #2]
-; CHECK-NEXT: vmov q1, q3
+; CHECK-NEXT: vmov q2, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q0
+; CHECK-NEXT: vstrw.32 q3, [r0, q2, uxtw #2]
; CHECK-NEXT: bne .LBB5_1
; CHECK-NEXT: @ %bb.2: @ %end
; CHECK-NEXT: bx lr
@@ -330,20 +330,20 @@ define arm_aapcs_vfpcc void @non_gatscat_use1(ptr noalias nocapture readonly %da
; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: adr r4, .LCPI7_0
; CHECK-NEXT: mov.w r12, #9
-; CHECK-NEXT: vldrw.u32 q1, [r4]
+; CHECK-NEXT: vldrw.u32 q0, [r4]
; CHECK-NEXT: mov.w lr, #12
; CHECK-NEXT: movs r4, #8
-; CHECK-NEXT: vdup.32 q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
; CHECK-NEXT: .LBB7_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmov q3, q0
-; CHECK-NEXT: vadd.i32 q2, q1, r4
-; CHECK-NEXT: vmla.i32 q3, q1, lr
-; CHECK-NEXT: vmul.i32 q1, q1, r12
-; CHECK-NEXT: vldrw.u32 q4, [q3, #24]
+; CHECK-NEXT: vmov q2, q0
+; CHECK-NEXT: vmov q3, q1
+; CHECK-NEXT: vmla.i32 q3, q2, lr
; CHECK-NEXT: subs r2, #4
-; CHECK-NEXT: vstrw.32 q1, [r3]
-; CHECK-NEXT: vmov q1, q2
+; CHECK-NEXT: vldrw.u32 q4, [q3, #24]
+; CHECK-NEXT: vmul.i32 q2, q2, r12
+; CHECK-NEXT: vadd.i32 q0, q0, r4
+; CHECK-NEXT: vstrw.32 q2, [r3]
; CHECK-NEXT: vstrb.8 q4, [r1], #16
; CHECK-NEXT: bne .LBB7_1
; CHECK-NEXT: @ %bb.2: @ %end
@@ -390,22 +390,22 @@ define arm_aapcs_vfpcc void @non_gatscat_use2(ptr noalias nocapture readonly %da
; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: adr r4, .LCPI8_0
; CHECK-NEXT: movs r5, #18
-; CHECK-NEXT: vldrw.u32 q2, [r4]
+; CHECK-NEXT: vldrw.u32 q0, [r4]
; CHECK-NEXT: mov.w r12, #9
; CHECK-NEXT: mov.w lr, #12
; CHECK-NEXT: movs r4, #8
-; CHECK-NEXT: vdup.32 q0, r0
-; CHECK-NEXT: vdup.32 q1, r5
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vdup.32 q2, r5
; CHECK-NEXT: .LBB8_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmov q4, q0
-; CHECK-NEXT: vadd.i32 q3, q2, r4
-; CHECK-NEXT: vmla.i32 q4, q2, lr
+; CHECK-NEXT: vmov q3, q0
+; CHECK-NEXT: vmov q4, q1
+; CHECK-NEXT: vmla.i32 q4, q3, lr
; CHECK-NEXT: subs r2, #4
; CHECK-NEXT: vldrw.u32 q5, [q4, #24]
-; CHECK-NEXT: vmov q4, q1
-; CHECK-NEXT: vmla.i32 q4, q2, r12
-; CHECK-NEXT: vmov q2, q3
+; CHECK-NEXT: vmov q4, q2
+; CHECK-NEXT: vmla.i32 q4, q3, r12
+; CHECK-NEXT: vadd.i32 q0, q0, r4
; CHECK-NEXT: vstrb.8 q5, [r1], #16
; CHECK-NEXT: vstrw.32 q4, [r3]
; CHECK-NEXT: bne .LBB8_1
@@ -487,21 +487,21 @@ define dso_local void @arm_mat_mult_q31(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: @ => This Loop Header: Depth=2
; CHECK-NEXT: @ Child Loop BB9_3 Depth 3
; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT: vmov q7, q2
+; CHECK-NEXT: vmov q1, q2
; CHECK-NEXT: dls lr, r10
; CHECK-NEXT: vmov.i32 q5, #0x0
-; CHECK-NEXT: vmlas.i32 q7, q0, r7
-; CHECK-NEXT: vmov q6, q4
+; CHECK-NEXT: vmlas.i32 q1, q0, r7
+; CHECK-NEXT: vmov q7, q4
; CHECK-NEXT: .LBB9_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB9_1 Depth=1
; CHECK-NEXT: @ Parent Loop BB9_2 Depth=2
; CHECK-NEXT: @ => This Inner Loop Header: Depth=3
-; CHECK-NEXT: vadd.i32 q0, q7, q3
-; CHECK-NEXT: vldrw.u32 q1, [r1, q7, uxtw #2]
-; CHECK-NEXT: vldrw.u32 q7, [q6, #32]!
-; CHECK-NEXT: vmul.i32 q1, q1, q7
-; CHECK-NEXT: vmov q7, q0
-; CHECK-NEXT: vadd.i32 q5, q1, q5
+; CHECK-NEXT: vmov q6, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q3
+; CHECK-NEXT: vldrw.u32 q0, [r1, q6, uxtw #2]
+; CHECK-NEXT: vldrw.u32 q6, [q7, #32]!
+; CHECK-NEXT: vmul.i32 q0, q0, q6
+; CHECK-NEXT: vadd.i32 q5, q0, q5
; CHECK-NEXT: le lr, .LBB9_3
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB9_2 Depth=2
@@ -702,12 +702,12 @@ define dso_local void @arm_mat_mult_q15(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: @ Parent Loop BB10_5 Depth=1
; CHECK-NEXT: @ Parent Loop BB10_8 Depth=2
; CHECK-NEXT: @ => This Inner Loop Header: Depth=3
-; CHECK-NEXT: vadd.i32 q6, q5, q3
-; CHECK-NEXT: vldrh.s32 q7, [r1, q5, uxtw #1]
-; CHECK-NEXT: vldrh.s32 q5, [r3], #8
-; CHECK-NEXT: vmul.i32 q5, q7, q5
-; CHECK-NEXT: vadd.i32 q4, q5, q4
-; CHECK-NEXT: vmov q5, q6
+; CHECK-NEXT: vmov q6, q5
+; CHECK-NEXT: vadd.i32 q5, q5, q3
+; CHECK-NEXT: vldrh.s32 q7, [r1, q6, uxtw #1]
+; CHECK-NEXT: vldrh.s32 q6, [r3], #8
+; CHECK-NEXT: vmul.i32 q6, q7, q6
+; CHECK-NEXT: vadd.i32 q4, q6, q4
; CHECK-NEXT: le lr, .LBB10_11
; CHECK-NEXT: @ %bb.12: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB10_8 Depth=2
@@ -922,15 +922,15 @@ define hidden arm_aapcs_vfpcc i32 @arm_depthwise_conv_s8(ptr nocapture readonly
; CHECK-NEXT: @ Parent Loop BB11_3 Depth=3
; CHECK-NEXT: @ Parent Loop BB11_4 Depth=4
; CHECK-NEXT: @ => This Inner Loop Header: Depth=5
-; CHECK-NEXT: vldrb.s32 q2, [r0, q5]
-; CHECK-NEXT: vadd.i32 q7, q5, q0
-; CHECK-NEXT: vldrb.s32 q5, [r1, q4]
-; CHECK-NEXT: vadd.i32 q6, q4, q0
-; CHECK-NEXT: vadd.i32 q2, q2, r2
+; CHECK-NEXT: vmov q7, q5
+; CHECK-NEXT: vmov q6, q4
+; CHECK-NEXT: vldrb.s32 q2, [r0, q7]
+; CHECK-NEXT: vldrb.s32 q7, [r1, q6]
; CHECK-NEXT: subs r5, #4
-; CHECK-NEXT: vmlava.u32 r12, q2, q5
-; CHECK-NEXT: vmov q5, q7
-; CHECK-NEXT: vmov q4, q6
+; CHECK-NEXT: vadd.i32 q4, q4, q0
+; CHECK-NEXT: vadd.i32 q2, q2, r2
+; CHECK-NEXT: vadd.i32 q5, q5, q0
+; CHECK-NEXT: vmlava.u32 r12, q2, q7
; CHECK-NEXT: bne .LBB11_5
; CHECK-NEXT: @ %bb.6: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB11_4 Depth=4
diff --git a/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll b/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll
index 43ed5ee..d6c5cde 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll
@@ -18,50 +18,50 @@ define void @arm_cmplx_dot_prod_q15(ptr noundef %pSrcA, ptr noundef %pSrcB, i32
; CHECK-NEXT: csel r7, r6, r5, hs
; CHECK-NEXT: add.w lr, r7, #1
; CHECK-NEXT: mov r4, r5
-; CHECK-NEXT: vldrh.u16 q0, [r0], #32
+; CHECK-NEXT: vldrh.u16 q1, [r0], #32
; CHECK-NEXT: movs r7, #0
; CHECK-NEXT: mov r8, r5
+; CHECK-NEXT: vldrh.u16 q2, [r1], #32
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q1, q2
+; CHECK-NEXT: vldrh.u16 q0, [r0, #-16]
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q1, q2
+; CHECK-NEXT: vldrh.u16 q2, [r1, #-16]
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q2
; CHECK-NEXT: vldrh.u16 q1, [r1], #32
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q1
-; CHECK-NEXT: vldrh.u16 q2, [r0, #-16]
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q1
-; CHECK-NEXT: vldrh.u16 q3, [r1, #-16]
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q2, q3
-; CHECK-NEXT: vldrh.u16 q0, [r1], #32
; CHECK-NEXT: sub.w lr, lr, #1
; CHECK-NEXT: cmp.w lr, #0
-; CHECK-NEXT: vldrh.u16 q1, [r0], #32
+; CHECK-NEXT: vldrh.u16 q3, [r0], #32
; CHECK-NEXT: beq .LBB0_3
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: .LBB0_2: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q2, q3
-; CHECK-NEXT: vldrh.u16 q3, [r1, #-16]
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q1, q0
-; CHECK-NEXT: vldrh.u16 q2, [r0, #-16]
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q1, q0
-; CHECK-NEXT: vldrh.u16 q1, [r0], #32
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q2, q3
-; CHECK-NEXT: vldrh.u16 q0, [r1], #32
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q2
+; CHECK-NEXT: vldrh.u16 q2, [r1, #-16]
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q3, q1
+; CHECK-NEXT: vldrh.u16 q0, [r0, #-16]
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q3, q1
+; CHECK-NEXT: vldrh.u16 q3, [r0], #32
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q2
+; CHECK-NEXT: vldrh.u16 q1, [r1], #32
; CHECK-NEXT: le lr, .LBB0_2
; CHECK-NEXT: .LBB0_3:
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q2, q3
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q2
; CHECK-NEXT: movs r6, #14
; CHECK-NEXT: and.w r2, r6, r2, lsl #1
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q1, q0
-; CHECK-NEXT: vldrh.u16 q2, [r0, #-16]
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q1, q0
-; CHECK-NEXT: vldrh.u16 q0, [r1, #-16]
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q2, q0
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q3, q1
+; CHECK-NEXT: vldrh.u16 q0, [r0, #-16]
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q3, q1
+; CHECK-NEXT: vldrh.u16 q1, [r1, #-16]
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q1
; CHECK-NEXT: vctp.16 r2
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q2, q0
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q1
; CHECK-NEXT: vpst
-; CHECK-NEXT: vldrht.u16 q1, [r0]
+; CHECK-NEXT: vldrht.u16 q2, [r0]
; CHECK-NEXT: cmp r2, #9
; CHECK-NEXT: vpsttt
; CHECK-NEXT: vldrht.u16 q0, [r1]
-; CHECK-NEXT: vmlsldavat.s16 r4, r7, q1, q0
-; CHECK-NEXT: vmlaldavaxt.s16 r8, r5, q1, q0
+; CHECK-NEXT: vmlsldavat.s16 r4, r7, q2, q0
+; CHECK-NEXT: vmlaldavaxt.s16 r8, r5, q2, q0
; CHECK-NEXT: blo .LBB0_10
; CHECK-NEXT: @ %bb.4: @ %do.body.1
; CHECK-NEXT: subs r2, #8
diff --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index 94d5490..6f2a0b2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -439,17 +439,18 @@ define arm_aapcs_vfpcc <8 x i16> @shuffle4step_i16(<32 x i16> %src) {
; CHECK-NEXT: vmovx.f16 s1, s14
; CHECK-NEXT: vmovx.f16 s20, s0
; CHECK-NEXT: vins.f16 s23, s1
-; CHECK-NEXT: vmovx.f16 s1, s2
-; CHECK-NEXT: vins.f16 s20, s1
+; CHECK-NEXT: vmov.f32 s1, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
; CHECK-NEXT: vmovx.f16 s21, s4
-; CHECK-NEXT: vmovx.f16 s1, s6
+; CHECK-NEXT: vins.f16 s20, s2
+; CHECK-NEXT: vmovx.f16 s2, s6
; CHECK-NEXT: vins.f16 s12, s14
; CHECK-NEXT: vins.f16 s8, s10
; CHECK-NEXT: vins.f16 s4, s6
-; CHECK-NEXT: vins.f16 s21, s1
-; CHECK-NEXT: vins.f16 s0, s2
-; CHECK-NEXT: vmov.f32 s1, s4
+; CHECK-NEXT: vins.f16 s21, s2
+; CHECK-NEXT: vins.f16 s0, s1
; CHECK-NEXT: vmov.f32 s2, s8
+; CHECK-NEXT: vmov.f32 s1, s4
; CHECK-NEXT: vmov.f32 s3, s12
; CHECK-NEXT: vadd.i16 q0, q0, q5
; CHECK-NEXT: vadd.i16 q0, q0, q4
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld4.ll b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
index ab41069..ecb1698 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
@@ -391,17 +391,18 @@ define void @vld4_v8i16_align1(ptr %src, ptr %dst) {
; CHECK-NEXT: vmovx.f16 s1, s2
; CHECK-NEXT: vmovx.f16 s20, s8
; CHECK-NEXT: vins.f16 s23, s1
-; CHECK-NEXT: vmovx.f16 s1, s10
-; CHECK-NEXT: vins.f16 s20, s1
+; CHECK-NEXT: vmov.f32 s1, s10
+; CHECK-NEXT: vmovx.f16 s10, s10
; CHECK-NEXT: vmovx.f16 s21, s12
-; CHECK-NEXT: vmovx.f16 s1, s14
+; CHECK-NEXT: vins.f16 s20, s10
+; CHECK-NEXT: vmovx.f16 s10, s14
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vins.f16 s12, s14
; CHECK-NEXT: vins.f16 s4, s6
-; CHECK-NEXT: vins.f16 s8, s10
-; CHECK-NEXT: vins.f16 s21, s1
-; CHECK-NEXT: vmov.f32 s9, s12
+; CHECK-NEXT: vins.f16 s21, s10
; CHECK-NEXT: vmov.f32 s10, s4
+; CHECK-NEXT: vins.f16 s8, s1
+; CHECK-NEXT: vmov.f32 s9, s12
; CHECK-NEXT: vmov.f32 s11, s0
; CHECK-NEXT: vadd.i16 q0, q2, q5
; CHECK-NEXT: vadd.i16 q0, q0, q4
diff --git a/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll
index 04be18e..6656d44 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll
@@ -344,14 +344,14 @@ define void @loop_absmax32_pred_c(ptr %0, i32 %1, ptr nocapture %2) {
; CHECK-NEXT: vmov.i32 q0, #0x0
; CHECK-NEXT: dlstp.32 lr, r1
; CHECK-NEXT: .LBB19_1: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrw.u32 q1, [r0], #16
-; CHECK-NEXT: vmaxnma.f32 q1, q0
-; CHECK-NEXT: vmov q0, q1
+; CHECK-NEXT: vmov q1, q0
+; CHECK-NEXT: vldrw.u32 q0, [r0], #16
+; CHECK-NEXT: vmaxnma.f32 q0, q1
; CHECK-NEXT: letp lr, .LBB19_1
; CHECK-NEXT: @ %bb.2:
-; CHECK-NEXT: vldr s0, .LCPI19_0
-; CHECK-NEXT: vmov r0, s0
-; CHECK-NEXT: vmaxnmav.f32 r0, q1
+; CHECK-NEXT: vldr s4, .LCPI19_0
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmaxnmav.f32 r0, q0
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vstr s0, [r2]
; CHECK-NEXT: pop {r7, pc}
@@ -538,14 +538,14 @@ define void @loop_absmax16_pred_c(ptr %0, i32 %1, ptr nocapture %2) {
; CHECK-NEXT: vmov.i32 q0, #0x0
; CHECK-NEXT: dlstp.16 lr, r1
; CHECK-NEXT: .LBB23_1: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrh.u16 q1, [r0], #8
-; CHECK-NEXT: vmaxnma.f16 q1, q0
-; CHECK-NEXT: vmov q0, q1
+; CHECK-NEXT: vmov q1, q0
+; CHECK-NEXT: vldrh.u16 q0, [r0], #8
+; CHECK-NEXT: vmaxnma.f16 q0, q1
; CHECK-NEXT: letp lr, .LBB23_1
; CHECK-NEXT: @ %bb.2:
-; CHECK-NEXT: vldr.16 s0, .LCPI23_0
-; CHECK-NEXT: vmov r0, s0
-; CHECK-NEXT: vmaxnmav.f16 r0, q1
+; CHECK-NEXT: vldr.16 s4, .LCPI23_0
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmaxnmav.f16 r0, q0
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vstr.16 s0, [r2]
; CHECK-NEXT: pop {r7, pc}
diff --git a/llvm/test/CodeGen/Thumb2/mve-vst4.ll b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
index 26ab555..fb5f543 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
@@ -1055,18 +1055,18 @@ define void @vst4_v4f16(ptr %src, ptr %dst) {
; CHECK-NEXT: vins.f16 s12, s2
; CHECK-NEXT: vmovx.f16 s2, s3
; CHECK-NEXT: vins.f16 s11, s2
-; CHECK-NEXT: vmovx.f16 s2, s4
-; CHECK-NEXT: vins.f16 s4, s6
-; CHECK-NEXT: vmovx.f16 s6, s6
+; CHECK-NEXT: vmov.f32 s2, s6
+; CHECK-NEXT: vmovx.f16 s6, s4
+; CHECK-NEXT: vins.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
; CHECK-NEXT: vins.f16 s1, s3
-; CHECK-NEXT: vins.f16 s2, s6
-; CHECK-NEXT: vmovx.f16 s6, s7
+; CHECK-NEXT: vins.f16 s6, s2
+; CHECK-NEXT: vmovx.f16 s2, s7
; CHECK-NEXT: vmov.f32 s8, s5
-; CHECK-NEXT: vins.f16 s10, s6
+; CHECK-NEXT: vins.f16 s10, s2
; CHECK-NEXT: vmov.f32 s9, s1
; CHECK-NEXT: vmov.f32 s5, s0
; CHECK-NEXT: vstrh.16 q2, [r1, #16]
-; CHECK-NEXT: vmov.f32 s6, s2
; CHECK-NEXT: vmov.f32 s7, s12
; CHECK-NEXT: vstrh.16 q1, [r1]
; CHECK-NEXT: pop {r4, r5, r6, pc}
diff --git a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
index e6fcf56..2929a04 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
@@ -63,8 +63,8 @@ define hidden i32 @f(i32 %n) local_unnamed_addr #0 {
; CHECK-NEXT: subs r0, #4
; CHECK-NEXT: sub.w r3, r4, #16
; CHECK-NEXT: add.w lr, r2, r0, lsr #2
-; CHECK-NEXT: movs r2, #0
; CHECK-NEXT: movs r0, #0
+; CHECK-NEXT: movs r2, #0
; CHECK-NEXT: .LBB0_5: @ %for.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr r5, [r3, #16]!
diff --git a/llvm/test/CodeGen/X86/3addr-16bit.ll b/llvm/test/CodeGen/X86/3addr-16bit.ll
index c9390d9..2b692bf 100644
--- a/llvm/test/CodeGen/X86/3addr-16bit.ll
+++ b/llvm/test/CodeGen/X86/3addr-16bit.ll
@@ -10,27 +10,27 @@ define zeroext i16 @test1(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test1:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: incl %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: incl %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB0_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB0_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test1:
; X86: ## %bb.0: ## %entry
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: incl %eax
; X86-NEXT: cmpw {{[0-9]+}}(%esp), %cx
; X86-NEXT: jne LBB0_2
@@ -63,27 +63,27 @@ define zeroext i16 @test2(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test2:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: decl %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: decl %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB1_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB1_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test2:
; X86: ## %bb.0: ## %entry
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: decl %eax
; X86-NEXT: cmpw {{[0-9]+}}(%esp), %cx
; X86-NEXT: jne LBB1_2
@@ -118,27 +118,27 @@ define zeroext i16 @test3(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test3:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: addl $2, %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: addl $2, %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB2_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB2_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test3:
; X86: ## %bb.0: ## %entry
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: addl $2, %eax
; X86-NEXT: cmpw {{[0-9]+}}(%esp), %cx
; X86-NEXT: jne LBB2_2
@@ -171,19 +171,19 @@ define zeroext i16 @test4(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test4:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: addl %edi, %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: addl %edi, %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB3_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB3_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test4:
@@ -191,8 +191,8 @@ define zeroext i16 @test4(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl %edx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: cmpw %cx, %dx
; X86-NEXT: jne LBB3_2
diff --git a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
index b4d40fe..71887e3 100644
--- a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
+++ b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
@@ -2156,15 +2156,17 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
; X64-LABEL: atomic_shl1_mask01_xor_16_gpr_brz:
; X64: # %bb.0: # %entry
; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl %ecx, %edx
; X64-NEXT: andb $15, %cl
-; X64-NEXT: movl $1, %edx
-; X64-NEXT: shll %cl, %edx
+; X64-NEXT: movl $1, %esi
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: .p2align 4
; X64-NEXT: .LBB34_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movl %eax, %ecx
-; X64-NEXT: xorl %edx, %ecx
+; X64-NEXT: xorl %esi, %ecx
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: lock cmpxchgw %cx, (%rdi)
; X64-NEXT: # kill: def $ax killed $ax def $eax
@@ -2172,12 +2174,12 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
; X64-NEXT: # %bb.2: # %atomicrmw.end
; X64-NEXT: movzwl %ax, %ecx
; X64-NEXT: movw $123, %ax
-; X64-NEXT: testl %ecx, %edx
+; X64-NEXT: testl %ecx, %esi
; X64-NEXT: je .LBB34_3
; X64-NEXT: # %bb.4: # %return
; X64-NEXT: retq
; X64-NEXT: .LBB34_3: # %if.then
-; X64-NEXT: movzwl %si, %eax
+; X64-NEXT: movzwl %dx, %eax
; X64-NEXT: movzwl (%rdi,%rax,2), %eax
; X64-NEXT: retq
entry:
@@ -3398,10 +3400,12 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
; X64-LABEL: atomic_shl1_mask01_and_16_gpr_brnz:
; X64: # %bb.0: # %entry
; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl %ecx, %edx
; X64-NEXT: andb $15, %cl
-; X64-NEXT: movl $1, %edx
-; X64-NEXT: shll %cl, %edx
+; X64-NEXT: movl $1, %esi
+; X64-NEXT: shll %cl, %esi
; X64-NEXT: movl $-2, %r8d
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: roll %cl, %r8d
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: .p2align 4
@@ -3415,10 +3419,10 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
; X64-NEXT: jne .LBB52_1
; X64-NEXT: # %bb.2: # %atomicrmw.end
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: testl %eax, %edx
+; X64-NEXT: testl %eax, %esi
; X64-NEXT: je .LBB52_3
; X64-NEXT: # %bb.4: # %if.then
-; X64-NEXT: movzwl %si, %eax
+; X64-NEXT: movzwl %dx, %eax
; X64-NEXT: movzwl (%rdi,%rax,2), %eax
; X64-NEXT: retq
; X64-NEXT: .LBB52_3:
diff --git a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
index 105ee7f..e118f5d 100644
--- a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
+++ b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
@@ -46,8 +46,9 @@ define <2 x half> @test_atomicrmw_fadd_v2f16_align4(ptr addrspace(1) %ptr, <2 x
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: lock cmpxchgl %ecx, (%rbx)
; CHECK-NEXT: setne %cl
-; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: pinsrw $0, %edx, %xmm0
; CHECK-NEXT: pinsrw $0, %eax, %xmm1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: jne .LBB0_1
diff --git a/llvm/test/CodeGen/X86/basic-block-sections-list.ll b/llvm/test/CodeGen/X86/basic-block-sections-list.ll
index 45ef452..d652a540 100644
--- a/llvm/test/CodeGen/X86/basic-block-sections-list.ll
+++ b/llvm/test/CodeGen/X86/basic-block-sections-list.ll
@@ -1,17 +1,13 @@
-;; Check the basic block sections list option.
-;; version 0 profile:
-; RUN: echo '!_Z3foob' > %t1
+;; Check that specifying the function in the basic block sections profile
+;; without any other directives is a noop.
;;
-;; version 1 profile:
-; RUN: echo 'v1' > %t2
-; RUN: echo 'f _Z3foob' >> %t2
+;; Specify the bb sections profile:
+; RUN: echo 'v1' > %t
+; RUN: echo 'f _Z3foob' >> %t
;;
-; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections -basic-block-sections=%t1 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t1 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-NO-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t1 -unique-basic-block-section-names --bbsections-guided-section-prefix=false | FileCheck %s -check-prefix=LINUX-SECTIONS-NO-GUIDED-PREFIX
-; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections -basic-block-sections=%t2 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t2 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-NO-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t2 -unique-basic-block-section-names --bbsections-guided-section-prefix=false | FileCheck %s -check-prefix=LINUX-SECTIONS-NO-GUIDED-PREFIX
+; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections -basic-block-sections=%t > %bbsections
+; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections > %orig
+; RUN: diff -u %orig %bbsections
define i32 @_Z3foob(i1 zeroext %0) nounwind {
%2 = alloca i32, align 4
@@ -41,45 +37,3 @@ define i32 @_Z3foob(i1 zeroext %0) nounwind {
declare i32 @_Z3barv() #1
declare i32 @_Z3bazv() #1
-
-define i32 @_Z3zipb(i1 zeroext %0) nounwind {
- %2 = alloca i32, align 4
- %3 = alloca i8, align 1
- %4 = zext i1 %0 to i8
- store i8 %4, ptr %3, align 1
- %5 = load i8, ptr %3, align 1
- %6 = trunc i8 %5 to i1
- %7 = zext i1 %6 to i32
- %8 = icmp sgt i32 %7, 0
- br i1 %8, label %9, label %11
-
-9: ; preds = %1
- %10 = call i32 @_Z3barv()
- store i32 %10, ptr %2, align 4
- br label %13
-
-11: ; preds = %1
- %12 = call i32 @_Z3bazv()
- store i32 %12, ptr %2, align 4
- br label %13
-
-13: ; preds = %11, %9
- %14 = load i32, ptr %2, align 4
- ret i32 %14
-}
-
-; LINUX-SECTIONS-NO-GUIDED-PREFIX: .section .text._Z3foob,"ax",@progbits
-; LINUX-SECTIONS: .section .text.hot._Z3foob,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob:
-; LINUX-SECTIONS: .section .text.hot._Z3foob._Z3foob.__part.1,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob.__part.1:
-; LINUX-SECTIONS: .section .text.hot._Z3foob._Z3foob.__part.2,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob.__part.2:
-; LINUX-SECTIONS: .section .text.hot._Z3foob._Z3foob.__part.3,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob.__part.3:
-
-; LINUX-SECTIONS-FUNCTION-SECTION: .section .text._Z3zipb,"ax",@progbits
-; LINUX-SECTIONS-NO-FUNCTION-SECTION-NOT: .section .text{{.*}}._Z3zipb,"ax",@progbits
-; LINUX-SECTIONS: _Z3zipb:
-; LINUX-SECTIONS-NOT: .section .text{{.*}}._Z3zipb.__part.{{[0-9]+}},"ax",@progbits
-; LINUX-SECTIONS-NOT: _Z3zipb.__part.{{[0-9]+}}:
diff --git a/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll b/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll
index d481b14..6e0db20 100644
--- a/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll
+++ b/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll
@@ -1,6 +1,8 @@
-; RUN: echo "!foo" > %t.order.txt
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t.order.txt | FileCheck --check-prefix=SOURCE-DRIFT %s
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t.order.txt -bbsections-detect-source-drift=false | FileCheck --check-prefix=HASH-CHECK-DISABLED %s
+; RUN: echo "v1" > %t
+; RUN: echo "f foo" >> %t
+; RUN: echo "c 0" >> %t
+; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t | FileCheck --check-prefix=SOURCE-DRIFT %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t -bbsections-detect-source-drift=false | FileCheck --check-prefix=HASH-CHECK-DISABLED %s
define dso_local i32 @foo(i1 zeroext %0, i1 zeroext %1) !annotation !1 {
br i1 %0, label %5, label %3
diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index 86d7df0c..fae1ff9 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -216,8 +216,8 @@ define i1 @trunc_v8i16_cmp(<8 x i16> %a0) nounwind {
define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind {
; SSE-LABEL: bitcast_v16i8_to_v2i8:
; SSE: # %bb.0:
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %eax
; SSE-NEXT: addb %cl, %al
; SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -225,8 +225,8 @@ define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind {
;
; AVX12-LABEL: bitcast_v16i8_to_v2i8:
; AVX12: # %bb.0:
-; AVX12-NEXT: vpmovmskb %xmm0, %ecx
-; AVX12-NEXT: movl %ecx, %eax
+; AVX12-NEXT: vpmovmskb %xmm0, %eax
+; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $8, %eax
; AVX12-NEXT: addb %cl, %al
; AVX12-NEXT: # kill: def $al killed $al killed $eax
@@ -441,8 +441,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: bitcast_v16i16_to_v2i8:
; SSE: # %bb.0:
; SSE-NEXT: packsswb %xmm1, %xmm0
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %eax
; SSE-NEXT: addb %cl, %al
; SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -452,8 +452,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovmskb %xmm0, %ecx
-; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: addb %cl, %al
; AVX1-NEXT: # kill: def $al killed $al killed $eax
@@ -464,8 +464,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpmovmskb %xmm0, %ecx
-; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: addb %cl, %al
; AVX2-NEXT: # kill: def $al killed $al killed $eax
@@ -762,8 +762,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %eax
; SSE-NEXT: addb %cl, %al
; SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -776,8 +776,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovmskb %xmm0, %ecx
-; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: addb %cl, %al
; AVX1-NEXT: # kill: def $al killed $al killed $eax
@@ -793,8 +793,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-NEXT: vpmovmskb %xmm0, %ecx
-; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: addb %cl, %al
; AVX2-NEXT: # kill: def $al killed $al killed $eax
diff --git a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
index 4d41c84..a42a715 100644
--- a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
+++ b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
@@ -7,8 +7,8 @@
define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0_(ptr %r) {
; CHECK-LABEL: _ZNK4llvm5APInt21multiplicativeInverseERKS0_:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: jmp .LBB0_1
; CHECK-NEXT: .p2align 4
@@ -68,8 +68,8 @@ _ZNK4llvm5APInt13getActiveBitsEv.exit.i.i: ; preds = %for.body.i.i.i.i.i
define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0__assert(ptr %r) {
; CHECK-LABEL: _ZNK4llvm5APInt21multiplicativeInverseERKS0__assert:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: jmp .LBB1_1
; CHECK-NEXT: .p2align 4
diff --git a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
index c1beb7c..c9c88f7 100644
--- a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
+++ b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
@@ -1031,31 +1031,30 @@ define void @simple_urem_fail_intermediate_inc(i32 %N, i32 %rem_amt) nounwind {
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: je .LBB17_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $1, %r15d
+; CHECK-NEXT: movl $1, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB17_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $1, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB17_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB17_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1199,32 +1198,31 @@ define void @simple_urem_to_sel_non_zero_start_through_add(i32 %N, i32 %rem_amt_
; CHECK-NEXT: cmpl $3, %edi
; CHECK-NEXT: jb .LBB21_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: orl $16, %ebx
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $7, %r15d
+; CHECK-NEXT: movl $7, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB21_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $5, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB21_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB21_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1251,32 +1249,31 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_missing_nuw(i32
; CHECK-NEXT: cmpl $3, %edi
; CHECK-NEXT: jb .LBB22_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: orl $16, %ebx
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $7, %r15d
+; CHECK-NEXT: movl $7, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB22_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $5, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB22_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB22_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1303,31 +1300,30 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_no_simplify_rem(
; CHECK-NEXT: cmpl $3, %edi
; CHECK-NEXT: jb .LBB23_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $7, %r15d
+; CHECK-NEXT: movl $7, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB23_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $5, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB23_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB23_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1404,32 +1400,31 @@ define void @simple_urem_to_sel_non_zero_start_through_sub_no_simplfy(i32 %N, i3
; CHECK-NEXT: cmpl %edx, %edi
; CHECK-NEXT: jbe .LBB25_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: movl %edx, %r15d
-; CHECK-NEXT: movl %esi, %ebx
+; CHECK-NEXT: movl %edx, %ebx
+; CHECK-NEXT: movl %esi, %ebp
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: addl $-2, %r15d
+; CHECK-NEXT: addl $-2, %ebx
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB25_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebx, %eax
; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ebx
+; CHECK-NEXT: divl %ebp
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebx, %eax
+; CHECK-NEXT: incl %ebx
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $-2, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB25_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB25_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll
index e223765..46b2571 100644
--- a/llvm/test/CodeGen/X86/freeze-binary.ll
+++ b/llvm/test/CodeGen/X86/freeze-binary.ll
@@ -490,20 +490,21 @@ define i32 @freeze_ashr_exact(i32 %a0) nounwind {
define i32 @freeze_ashr_exact_extra_use(i32 %a0, ptr %escape) nounwind {
; X86-LABEL: freeze_ashr_exact_extra_use:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: sarl $3, %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl $3, %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: sarl $6, %eax
+; X86-NEXT: movl %edx, (%ecx)
; X86-NEXT: retl
;
; X64-LABEL: freeze_ashr_exact_extra_use:
; X64: # %bb.0:
-; X64-NEXT: sarl $3, %edi
-; X64-NEXT: movl %edi, (%rsi)
; X64-NEXT: movl %edi, %eax
+; X64-NEXT: sarl $3, %eax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: sarl $6, %eax
+; X64-NEXT: movl %ecx, (%rsi)
; X64-NEXT: retq
%x = ashr exact i32 %a0, 3
%y = freeze i32 %x
@@ -604,20 +605,21 @@ define i32 @freeze_lshr_exact(i32 %a0) nounwind {
define i32 @freeze_lshr_exact_extra_use(i32 %a0, ptr %escape) nounwind {
; X86-LABEL: freeze_lshr_exact_extra_use:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shrl $3, %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl $3, %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: shrl $5, %eax
+; X86-NEXT: movl %edx, (%ecx)
; X86-NEXT: retl
;
; X64-LABEL: freeze_lshr_exact_extra_use:
; X64: # %bb.0:
-; X64-NEXT: shrl $3, %edi
-; X64-NEXT: movl %edi, (%rsi)
; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $3, %eax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrl $5, %eax
+; X64-NEXT: movl %ecx, (%rsi)
; X64-NEXT: retq
%x = lshr exact i32 %a0, 3
%y = freeze i32 %x
diff --git a/llvm/test/CodeGen/X86/i128-mul.ll b/llvm/test/CodeGen/X86/i128-mul.ll
index cffd88c..477a0dc 100644
--- a/llvm/test/CodeGen/X86/i128-mul.ll
+++ b/llvm/test/CodeGen/X86/i128-mul.ll
@@ -111,62 +111,63 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X86-NOBMI-NEXT: orl %ecx, %eax
; X86-NOBMI-NEXT: je .LBB1_3
; X86-NOBMI-NEXT: # %bb.1: # %for.body.preheader
-; X86-NOBMI-NEXT: xorl %eax, %eax
-; X86-NOBMI-NEXT: xorl %edx, %edx
+; X86-NOBMI-NEXT: xorl %esi, %esi
; X86-NOBMI-NEXT: xorl %ecx, %ecx
-; X86-NOBMI-NEXT: movl $0, (%esp) # 4-byte Folded Spill
+; X86-NOBMI-NEXT: xorl %edi, %edi
+; X86-NOBMI-NEXT: xorl %ebp, %ebp
; X86-NOBMI-NEXT: .p2align 4
; X86-NOBMI-NEXT: .LBB1_2: # %for.body
; X86-NOBMI-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NOBMI-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NOBMI-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NOBMI-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NOBMI-NEXT: movl (%eax,%ecx,8), %edi
-; X86-NOBMI-NEXT: movl 4(%eax,%ecx,8), %ebx
+; X86-NOBMI-NEXT: movl (%eax,%edi,8), %ebp
+; X86-NOBMI-NEXT: movl 4(%eax,%edi,8), %ebx
; X86-NOBMI-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NOBMI-NEXT: movl %edi, %eax
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT: mull %esi
-; X86-NOBMI-NEXT: movl %edx, %ebp
+; X86-NOBMI-NEXT: movl %ebp, %eax
+; X86-NOBMI-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NOBMI-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NOBMI-NEXT: movl %ebx, %eax
-; X86-NOBMI-NEXT: mull %esi
-; X86-NOBMI-NEXT: movl %edx, %ebx
-; X86-NOBMI-NEXT: movl %eax, %esi
-; X86-NOBMI-NEXT: addl %ebp, %esi
-; X86-NOBMI-NEXT: adcl $0, %ebx
-; X86-NOBMI-NEXT: movl %edi, %eax
+; X86-NOBMI-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %eax, %ebx
+; X86-NOBMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NOBMI-NEXT: adcl $0, %edx
+; X86-NOBMI-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-NOBMI-NEXT: movl %ebp, %eax
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOBMI-NEXT: mull %edx
-; X86-NOBMI-NEXT: movl %edx, %ebp
-; X86-NOBMI-NEXT: movl %eax, %edi
-; X86-NOBMI-NEXT: addl %esi, %edi
-; X86-NOBMI-NEXT: adcl %ebx, %ebp
-; X86-NOBMI-NEXT: setb %bl
+; X86-NOBMI-NEXT: movl %eax, %ebp
+; X86-NOBMI-NEXT: addl %ebx, %ebp
+; X86-NOBMI-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NOBMI-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
+; X86-NOBMI-NEXT: movl %edx, %ebx
+; X86-NOBMI-NEXT: setb (%esp) # 1-byte Folded Spill
; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NOBMI-NEXT: mull {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: addl %ebp, %eax
-; X86-NOBMI-NEXT: movzbl %bl, %esi
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NOBMI-NEXT: adcl %esi, %edx
-; X86-NOBMI-NEXT: movl %ecx, %ebx
-; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NOBMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NOBMI-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NOBMI-NEXT: adcl $0, %eax
-; X86-NOBMI-NEXT: adcl $0, %edx
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT: movl %ecx, (%esi,%ebx,8)
-; X86-NOBMI-NEXT: movl %ebx, %ecx
-; X86-NOBMI-NEXT: movl %edi, 4(%esi,%ebx,8)
-; X86-NOBMI-NEXT: addl $1, %ecx
-; X86-NOBMI-NEXT: movl (%esp), %edi # 4-byte Reload
-; X86-NOBMI-NEXT: adcl $0, %edi
-; X86-NOBMI-NEXT: movl %ecx, %esi
-; X86-NOBMI-NEXT: xorl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-NOBMI-NEXT: xorl %ebp, %edi
-; X86-NOBMI-NEXT: orl %esi, %edi
+; X86-NOBMI-NEXT: movl %eax, %esi
+; X86-NOBMI-NEXT: addl %ebx, %esi
+; X86-NOBMI-NEXT: movl %ecx, %eax
+; X86-NOBMI-NEXT: movzbl (%esp), %ebx # 1-byte Folded Reload
+; X86-NOBMI-NEXT: movl %edx, %ecx
+; X86-NOBMI-NEXT: adcl %ebx, %ecx
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NOBMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-NOBMI-NEXT: adcl %eax, %ebp
+; X86-NOBMI-NEXT: adcl $0, %esi
+; X86-NOBMI-NEXT: adcl $0, %ecx
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT: movl %edx, (%eax,%edi,8)
+; X86-NOBMI-NEXT: movl %ebp, 4(%eax,%edi,8)
+; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT: addl $1, %edi
+; X86-NOBMI-NEXT: adcl $0, %ebp
+; X86-NOBMI-NEXT: movl %edi, %eax
+; X86-NOBMI-NEXT: xorl %edx, %eax
+; X86-NOBMI-NEXT: movl %ebp, %edx
+; X86-NOBMI-NEXT: xorl %ebx, %edx
+; X86-NOBMI-NEXT: orl %eax, %edx
; X86-NOBMI-NEXT: jne .LBB1_2
; X86-NOBMI-NEXT: .LBB1_3: # %for.end
; X86-NOBMI-NEXT: xorl %eax, %eax
@@ -184,71 +185,66 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X86-BMI-NEXT: pushl %ebx
; X86-BMI-NEXT: pushl %edi
; X86-BMI-NEXT: pushl %esi
-; X86-BMI-NEXT: subl $20, %esp
+; X86-BMI-NEXT: subl $16, %esp
; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-BMI-NEXT: orl %ecx, %eax
; X86-BMI-NEXT: je .LBB1_3
; X86-BMI-NEXT: # %bb.1: # %for.body.preheader
-; X86-BMI-NEXT: xorl %ecx, %ecx
-; X86-BMI-NEXT: xorl %eax, %eax
+; X86-BMI-NEXT: xorl %esi, %esi
+; X86-BMI-NEXT: xorl %edi, %edi
; X86-BMI-NEXT: xorl %ebx, %ebx
-; X86-BMI-NEXT: xorl %ebp, %ebp
+; X86-BMI-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-BMI-NEXT: .p2align 4
; X86-BMI-NEXT: .LBB1_2: # %for.body
; X86-BMI-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-BMI-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-BMI-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-BMI-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-BMI-NEXT: movl (%eax,%ebx,8), %ecx
-; X86-BMI-NEXT: movl 4(%eax,%ebx,8), %esi
-; X86-BMI-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-BMI-NEXT: movl 4(%eax,%ebx,8), %ebp
+; X86-BMI-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-BMI-NEXT: movl %ecx, %edx
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-BMI-NEXT: mulxl %eax, %edx, %edi
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %edx, %eax
+; X86-BMI-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-BMI-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-BMI-NEXT: movl %esi, %edx
-; X86-BMI-NEXT: mulxl %eax, %esi, %eax
-; X86-BMI-NEXT: addl %edi, %esi
-; X86-BMI-NEXT: adcl $0, %eax
+; X86-BMI-NEXT: movl %ebp, %edx
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %eax, %ebp
+; X86-BMI-NEXT: addl (%esp), %eax # 4-byte Folded Reload
+; X86-BMI-NEXT: adcl $0, %ebp
; X86-BMI-NEXT: movl %ecx, %edx
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-BMI-NEXT: mulxl %ecx, %edi, %ebp
-; X86-BMI-NEXT: addl %esi, %edi
-; X86-BMI-NEXT: adcl %eax, %ebp
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %ecx, %edx
+; X86-BMI-NEXT: addl %eax, %ecx
+; X86-BMI-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X86-BMI-NEXT: movl %esi, %eax
+; X86-BMI-NEXT: adcl %ebp, %edx
+; X86-BMI-NEXT: movl %edx, %ebp
; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-BMI-NEXT: mulxl %ecx, %ecx, %eax
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %esi, %edi
; X86-BMI-NEXT: setb %dl
-; X86-BMI-NEXT: addl %ebp, %ecx
-; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI-NEXT: addl %ebp, %esi
; X86-BMI-NEXT: movzbl %dl, %edx
-; X86-BMI-NEXT: adcl %edx, %eax
-; X86-BMI-NEXT: movl %eax, %edx
-; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-BMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-BMI-NEXT: adcl (%esp), %edi # 4-byte Folded Reload
-; X86-BMI-NEXT: adcl $0, %ecx
-; X86-BMI-NEXT: adcl $0, %edx
-; X86-BMI-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-BMI-NEXT: movl %eax, (%edx,%ebx,8)
-; X86-BMI-NEXT: movl %edi, 4(%edx,%ebx,8)
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-BMI-NEXT: adcl %edx, %edi
+; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-BMI-NEXT: addl %eax, %edx
+; X86-BMI-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
+; X86-BMI-NEXT: adcl $0, %esi
+; X86-BMI-NEXT: adcl $0, %edi
+; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-BMI-NEXT: movl %edx, (%eax,%ebx,8)
+; X86-BMI-NEXT: movl %ecx, 4(%eax,%ebx,8)
; X86-BMI-NEXT: addl $1, %ebx
-; X86-BMI-NEXT: adcl $0, %ebp
-; X86-BMI-NEXT: movl %ebx, %edx
-; X86-BMI-NEXT: xorl %esi, %edx
-; X86-BMI-NEXT: movl %ebp, %esi
-; X86-BMI-NEXT: xorl %edi, %esi
-; X86-BMI-NEXT: orl %edx, %esi
-; X86-BMI-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-BMI-NEXT: adcl $0, %ecx
+; X86-BMI-NEXT: movl %ebx, %eax
+; X86-BMI-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X86-BMI-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-BMI-NEXT: xorl %ebp, %ecx
+; X86-BMI-NEXT: orl %eax, %ecx
; X86-BMI-NEXT: jne .LBB1_2
; X86-BMI-NEXT: .LBB1_3: # %for.end
; X86-BMI-NEXT: xorl %eax, %eax
; X86-BMI-NEXT: xorl %edx, %edx
-; X86-BMI-NEXT: addl $20, %esp
+; X86-BMI-NEXT: addl $16, %esp
; X86-BMI-NEXT: popl %esi
; X86-BMI-NEXT: popl %edi
; X86-BMI-NEXT: popl %ebx
@@ -261,11 +257,12 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-NOBMI-NEXT: je .LBB1_3
; X64-NOBMI-NEXT: # %bb.1: # %for.body.preheader
; X64-NOBMI-NEXT: movq %rdx, %r8
-; X64-NOBMI-NEXT: xorl %r10d, %r10d
+; X64-NOBMI-NEXT: xorl %edx, %edx
; X64-NOBMI-NEXT: xorl %r9d, %r9d
; X64-NOBMI-NEXT: .p2align 4
; X64-NOBMI-NEXT: .LBB1_2: # %for.body
; X64-NOBMI-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-NOBMI-NEXT: movq %rdx, %r10
; X64-NOBMI-NEXT: movq %rcx, %rax
; X64-NOBMI-NEXT: mulq (%r8,%r9,8)
; X64-NOBMI-NEXT: addq %r10, %rax
@@ -273,7 +270,6 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-NOBMI-NEXT: movq %rax, (%rsi,%r9,8)
; X64-NOBMI-NEXT: incq %r9
; X64-NOBMI-NEXT: cmpq %r9, %rdi
-; X64-NOBMI-NEXT: movq %rdx, %r10
; X64-NOBMI-NEXT: jne .LBB1_2
; X64-NOBMI-NEXT: .LBB1_3: # %for.end
; X64-NOBMI-NEXT: xorl %eax, %eax
@@ -285,11 +281,12 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-BMI-NEXT: je .LBB1_3
; X64-BMI-NEXT: # %bb.1: # %for.body.preheader
; X64-BMI-NEXT: movq %rdx, %rax
-; X64-BMI-NEXT: xorl %r9d, %r9d
+; X64-BMI-NEXT: xorl %edx, %edx
; X64-BMI-NEXT: xorl %r8d, %r8d
; X64-BMI-NEXT: .p2align 4
; X64-BMI-NEXT: .LBB1_2: # %for.body
; X64-BMI-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-BMI-NEXT: movq %rdx, %r9
; X64-BMI-NEXT: movq %rcx, %rdx
; X64-BMI-NEXT: mulxq (%rax,%r8,8), %r10, %rdx
; X64-BMI-NEXT: addq %r9, %r10
@@ -297,7 +294,6 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-BMI-NEXT: movq %r10, (%rsi,%r8,8)
; X64-BMI-NEXT: incq %r8
; X64-BMI-NEXT: cmpq %r8, %rdi
-; X64-BMI-NEXT: movq %rdx, %r9
; X64-BMI-NEXT: jne .LBB1_2
; X64-BMI-NEXT: .LBB1_3: # %for.end
; X64-BMI-NEXT: xorl %eax, %eax
diff --git a/llvm/test/CodeGen/X86/icmp-abs-C.ll b/llvm/test/CodeGen/X86/icmp-abs-C.ll
index 53b70fa..c98889b 100644
--- a/llvm/test/CodeGen/X86/icmp-abs-C.ll
+++ b/llvm/test/CodeGen/X86/icmp-abs-C.ll
@@ -161,22 +161,22 @@ define i16 @ne_and_with_dom_abs(i16 %x) nounwind {
; X86-LABEL: ne_and_with_dom_abs:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movswl %cx, %eax
-; X86-NEXT: sarl $15, %eax
-; X86-NEXT: xorl %eax, %ecx
-; X86-NEXT: subl %eax, %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movswl %ax, %ecx
+; X86-NEXT: sarl $15, %ecx
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: xorl $12312, %eax # imm = 0x3018
; X86-NEXT: movzwl %ax, %esi
-; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: cmpw $64, %cx
-; X86-NEXT: setne %cl
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: cmpw $64, %dx
+; X86-NEXT: setne %dl
; X86-NEXT: cmpl $2345, %esi # imm = 0x929
; X86-NEXT: jae .LBB3_2
; X86-NEXT: # %bb.1:
-; X86-NEXT: movb %cl, %dl
-; X86-NEXT: movl %edx, %eax
+; X86-NEXT: movb %dl, %cl
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: .LBB3_2:
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/llvm.sincospi.ll b/llvm/test/CodeGen/X86/llvm.sincospi.ll
new file mode 100644
index 0000000..5546c66
--- /dev/null
+++ b/llvm/test/CodeGen/X86/llvm.sincospi.ll
@@ -0,0 +1,233 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=x86_64-apple-macosx10.9 < %s | FileCheck %s
+
+define { half, half } @test_sincospi_f16(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm1
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: retq
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ ret { half, half } %result
+}
+
+define half @test_sincospi_f16_only_use_sin(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_sin:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: movq %rsp, %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.0 = extractvalue { half, half } %result, 0
+ ret half %result.0
+}
+
+define half @test_sincospi_f16_only_use_cos(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_cos:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.1 = extractvalue { half, half } %result, 1
+ ret half %result.1
+}
+
+define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f16:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $64, %rsp
+; CHECK-NEXT: pextrw $0, %xmm0, %ebx
+; CHECK-NEXT: psrld $16, %xmm0
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movzwl %bx, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
+; CHECK-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
+; CHECK-NEXT: ## xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; CHECK-NEXT: addq $64, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ %result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a)
+ ret { <2 x half>, <2 x half> } %result
+}
+
+define { float, float } @test_sincospi_f32(float %a) #0 {
+; CHECK-LABEL: test_sincospi_f32:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f32:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: retq
+ %result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a)
+ ret { <2 x float>, <2 x float> } %result
+}
+
+define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v3f32:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $56, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: addq $56, %rsp
+; CHECK-NEXT: retq
+ %result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a)
+ ret { <3 x float>, <3 x float> } %result
+}
+
+define { double, double } @test_sincospi_f64(double %a) #0 {
+; CHECK-LABEL: test_sincospi_f64:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospi
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: retq
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
+
+define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f64:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $56, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospi
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospi
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1]
+; CHECK-NEXT: addq $56, %rsp
+; CHECK-NEXT: retq
+ %result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index caec02e..2f691e7 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -255,9 +255,9 @@ define <8 x i32> @test7(ptr %base, <8 x i32> %ind, i8 %mask) {
; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-KNL-NEXT: kmovw %k1, %k2
; X64-KNL-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k2}
-; X64-KNL-NEXT: vmovdqa64 %zmm1, %zmm2
-; X64-KNL-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm2 {%k1}
-; X64-KNL-NEXT: vpaddd %ymm2, %ymm1, %ymm0
+; X64-KNL-NEXT: vmovdqa %ymm1, %ymm2
+; X64-KNL-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
+; X64-KNL-NEXT: vpaddd %ymm1, %ymm2, %ymm0
; X64-KNL-NEXT: retq
;
; X86-KNL-LABEL: test7:
@@ -271,9 +271,9 @@ define <8 x i32> @test7(ptr %base, <8 x i32> %ind, i8 %mask) {
; X86-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-KNL-NEXT: kmovw %k1, %k2
; X86-KNL-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm1 {%k2}
-; X86-KNL-NEXT: vmovdqa64 %zmm1, %zmm2
-; X86-KNL-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm2 {%k1}
-; X86-KNL-NEXT: vpaddd %ymm2, %ymm1, %ymm0
+; X86-KNL-NEXT: vmovdqa %ymm1, %ymm2
+; X86-KNL-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm1 {%k1}
+; X86-KNL-NEXT: vpaddd %ymm1, %ymm2, %ymm0
; X86-KNL-NEXT: retl
;
; X64-SKX-LABEL: test7:
diff --git a/llvm/test/CodeGen/X86/midpoint-int.ll b/llvm/test/CodeGen/X86/midpoint-int.ll
index a75d42e..c058e37 100644
--- a/llvm/test/CodeGen/X86/midpoint-int.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int.ll
@@ -658,9 +658,9 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -710,9 +710,9 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setbe %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -765,9 +765,9 @@ define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind {
; X86-NEXT: pushl %ebx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzwl (%eax), %ecx
+; X86-NEXT: movzwl (%eax), %eax
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -817,11 +817,11 @@ define i16 @scalar_i16_signed_reg_mem(i16 %a1, ptr %a2_addr) nounwind {
; X86-LABEL: scalar_i16_signed_reg_mem:
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -871,12 +871,12 @@ define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; X86-LABEL: scalar_i16_signed_mem_mem:
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movzwl (%ecx), %ecx
-; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
diff --git a/llvm/test/CodeGen/X86/mmx-arith.ll b/llvm/test/CodeGen/X86/mmx-arith.ll
index 73d459b..8f97d26 100644
--- a/llvm/test/CodeGen/X86/mmx-arith.ll
+++ b/llvm/test/CodeGen/X86/mmx-arith.ll
@@ -403,11 +403,11 @@ define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: xorl %eax, %eax
; X86-NEXT: testl %ecx, %ecx
; X86-NEXT: je .LBB3_1
; X86-NEXT: # %bb.2: # %bb26.preheader
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB3_3: # %bb26
@@ -427,7 +427,6 @@ define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
; X86-NEXT: jb .LBB3_3
; X86-NEXT: jmp .LBB3_4
; X86-NEXT: .LBB3_1:
-; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: .LBB3_4: # %bb31
; X86-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/mul-constant-i16.ll b/llvm/test/CodeGen/X86/mul-constant-i16.ll
index b1aa789..a663f6a 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i16.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i16.ll
@@ -715,8 +715,8 @@ define i16 @test_mul_by_66(i16 %x) {
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
-; X64-NEXT: shll $6, %eax
-; X64-NEXT: leal (%rax,%rdi,2), %eax
+; X64-NEXT: shll $6, %edi
+; X64-NEXT: leal (%rdi,%rax,2), %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 66
@@ -757,8 +757,8 @@ define i16 @test_mul_by_520(i16 %x) {
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
-; X64-NEXT: shll $9, %eax
-; X64-NEXT: leal (%rax,%rdi,8), %eax
+; X64-NEXT: shll $9, %edi
+; X64-NEXT: leal (%rdi,%rax,8), %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 520
diff --git a/llvm/test/CodeGen/X86/mul-constant-i32.ll b/llvm/test/CodeGen/X86/mul-constant-i32.ll
index 79889b9..4129b44 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i32.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i32.ll
@@ -1155,16 +1155,16 @@ define i32 @test_mul_by_66(i32 %x) {
; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: movl %edi, %eax
-; X64-HSW-NEXT: shll $6, %eax
-; X64-HSW-NEXT: leal (%rax,%rdi,2), %eax
+; X64-HSW-NEXT: shll $6, %edi
+; X64-HSW-NEXT: leal (%rdi,%rax,2), %eax
; X64-HSW-NEXT: retq
;
; X64-JAG-LABEL: test_mul_by_66:
; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: movl %edi, %eax
-; X64-JAG-NEXT: shll $6, %eax
-; X64-JAG-NEXT: leal (%rax,%rdi,2), %eax
+; X64-JAG-NEXT: shll $6, %edi
+; X64-JAG-NEXT: leal (%rdi,%rax,2), %eax
; X64-JAG-NEXT: retq
;
; X86-NOOPT-LABEL: test_mul_by_66:
@@ -1241,16 +1241,16 @@ define i32 @test_mul_by_520(i32 %x) {
; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: movl %edi, %eax
-; X64-HSW-NEXT: shll $9, %eax
-; X64-HSW-NEXT: leal (%rax,%rdi,8), %eax
+; X64-HSW-NEXT: shll $9, %edi
+; X64-HSW-NEXT: leal (%rdi,%rax,8), %eax
; X64-HSW-NEXT: retq
;
; X64-JAG-LABEL: test_mul_by_520:
; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: movl %edi, %eax
-; X64-JAG-NEXT: shll $9, %eax
-; X64-JAG-NEXT: leal (%rax,%rdi,8), %eax
+; X64-JAG-NEXT: shll $9, %edi
+; X64-JAG-NEXT: leal (%rdi,%rax,8), %eax
; X64-JAG-NEXT: retq
;
; X86-NOOPT-LABEL: test_mul_by_520:
diff --git a/llvm/test/CodeGen/X86/mul-constant-i8.ll b/llvm/test/CodeGen/X86/mul-constant-i8.ll
index a4fa1ee..b488653 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i8.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i8.ll
@@ -425,8 +425,8 @@ define i8 @test_mul_by_66(i8 %x) {
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
-; X64-NEXT: shll $6, %eax
-; X64-NEXT: leal (%rax,%rdi,2), %eax
+; X64-NEXT: shll $6, %edi
+; X64-NEXT: leal (%rdi,%rax,2), %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%m = mul i8 %x, 66
diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 283c00e..b6af7e1 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -16,65 +16,65 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: imull %ebp, %ecx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; CHECK-NEXT: movl %edx, %eax
+; CHECK-NEXT: imull %esi, %eax
; CHECK-NEXT: cmpl $1, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %ecx, (%esp) ## 4-byte Spill
+; CHECK-NEXT: movl %eax, (%esp) ## 4-byte Spill
; CHECK-NEXT: je LBB0_19
; CHECK-NEXT: ## %bb.1: ## %bb10.preheader
-; CHECK-NEXT: movl %ecx, %eax
-; CHECK-NEXT: sarl $31, %eax
-; CHECK-NEXT: shrl $30, %eax
-; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: sarl $2, %eax
-; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: movl %eax, %ebp
+; CHECK-NEXT: sarl $31, %ebp
+; CHECK-NEXT: shrl $30, %ebp
+; CHECK-NEXT: addl %eax, %ebp
+; CHECK-NEXT: sarl $2, %ebp
+; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: jle LBB0_12
; CHECK-NEXT: ## %bb.2: ## %bb.nph9
-; CHECK-NEXT: testl %ebp, %ebp
+; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: jle LBB0_12
; CHECK-NEXT: ## %bb.3: ## %bb.nph9.split
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: incl %eax
; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: movl %edi, %edx
+; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_4: ## %bb6
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movzbl (%eax,%esi,2), %ebx
-; CHECK-NEXT: movb %bl, (%edx,%esi)
-; CHECK-NEXT: incl %esi
-; CHECK-NEXT: cmpl %ebp, %esi
+; CHECK-NEXT: movzbl (%eax,%edi,2), %ebx
+; CHECK-NEXT: movb %bl, (%edx,%edi)
+; CHECK-NEXT: incl %edi
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jl LBB0_4
; CHECK-NEXT: ## %bb.5: ## %bb9
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
; CHECK-NEXT: incl %ecx
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: addl %ebp, %edx
-; CHECK-NEXT: cmpl %edi, %ecx
+; CHECK-NEXT: addl %esi, %edx
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: je LBB0_12
; CHECK-NEXT: ## %bb.6: ## %bb7.preheader
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: jmp LBB0_4
; CHECK-NEXT: LBB0_12: ## %bb18.loopexit
+; CHECK-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: movl (%esp), %eax ## 4-byte Reload
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx ## 4-byte Reload
-; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %ebp, %eax
; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: cmpl $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: jle LBB0_13
; CHECK-NEXT: ## %bb.7: ## %bb.nph5
-; CHECK-NEXT: cmpl $2, %ebp
+; CHECK-NEXT: cmpl $2, %esi
; CHECK-NEXT: jl LBB0_13
; CHECK-NEXT: ## %bb.8: ## %bb.nph5.split
-; CHECK-NEXT: movl %ebp, %edx
-; CHECK-NEXT: shrl $31, %edx
-; CHECK-NEXT: addl %ebp, %edx
-; CHECK-NEXT: sarl %edx
+; CHECK-NEXT: movl %esi, %ebp
+; CHECK-NEXT: shrl $31, %ebp
+; CHECK-NEXT: addl %esi, %ebp
+; CHECK-NEXT: sarl %ebp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: shrl $31, %ecx
@@ -84,102 +84,103 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax ## 4-byte Reload
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: addl $2, %esi
-; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: movl (%esp), %esi ## 4-byte Reload
-; CHECK-NEXT: addl %esi, %ecx
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl $2, %edx
+; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
+; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
+; CHECK-NEXT: addl %edx, %ecx
; CHECK-NEXT: xorl %edi, %edi
+; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_9: ## %bb13
; CHECK-NEXT: ## =>This Loop Header: Depth=1
; CHECK-NEXT: ## Child Loop BB0_10 Depth 2
; CHECK-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: addl %esi, %edi
+; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
+; CHECK-NEXT: addl %edx, %edi
; CHECK-NEXT: imull {{[0-9]+}}(%esp), %edi
; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi ## 4-byte Folded Reload
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: xorl %ebx, %ebx
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_10: ## %bb14
; CHECK-NEXT: ## Parent Loop BB0_9 Depth=1
; CHECK-NEXT: ## => This Inner Loop Header: Depth=2
-; CHECK-NEXT: movzbl -2(%edi,%esi,4), %ebx
-; CHECK-NEXT: movb %bl, (%ecx,%esi)
-; CHECK-NEXT: movzbl (%edi,%esi,4), %ebx
-; CHECK-NEXT: movb %bl, (%eax,%esi)
-; CHECK-NEXT: incl %esi
-; CHECK-NEXT: cmpl %edx, %esi
+; CHECK-NEXT: movzbl -2(%edi,%ebx,4), %edx
+; CHECK-NEXT: movb %dl, (%ecx,%ebx)
+; CHECK-NEXT: movzbl (%edi,%ebx,4), %edx
+; CHECK-NEXT: movb %dl, (%eax,%ebx)
+; CHECK-NEXT: incl %ebx
+; CHECK-NEXT: cmpl %ebp, %ebx
; CHECK-NEXT: jl LBB0_10
; CHECK-NEXT: ## %bb.11: ## %bb17
; CHECK-NEXT: ## in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi ## 4-byte Reload
; CHECK-NEXT: incl %edi
-; CHECK-NEXT: addl %edx, %eax
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi ## 4-byte Reload
-; CHECK-NEXT: addl $2, %esi
-; CHECK-NEXT: addl %edx, %ecx
+; CHECK-NEXT: addl %ebp, %eax
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Reload
+; CHECK-NEXT: addl $2, %edx
+; CHECK-NEXT: addl %ebp, %ecx
; CHECK-NEXT: cmpl {{[-0-9]+}}(%e{{[sb]}}p), %edi ## 4-byte Folded Reload
; CHECK-NEXT: jl LBB0_9
; CHECK-NEXT: LBB0_13: ## %bb20
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: cmpl $1, %eax
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpl $1, %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebx
; CHECK-NEXT: je LBB0_19
; CHECK-NEXT: ## %bb.14: ## %bb20
-; CHECK-NEXT: cmpl $3, %eax
+; CHECK-NEXT: cmpl $3, %ecx
; CHECK-NEXT: jne LBB0_24
; CHECK-NEXT: ## %bb.15: ## %bb22
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Reload
-; CHECK-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill
-; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp ## 4-byte Reload
+; CHECK-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill
+; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: jle LBB0_18
; CHECK-NEXT: ## %bb.16: ## %bb.nph
-; CHECK-NEXT: leal 15(%edi), %eax
+; CHECK-NEXT: leal 15(%edx), %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: imull {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: addl %ebx, %ebx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: movl (%esp), %esi ## 4-byte Reload
-; CHECK-NEXT: addl %esi, %ecx
-; CHECK-NEXT: addl %ecx, %ebx
-; CHECK-NEXT: addl %eax, %edx
-; CHECK-NEXT: leal 15(%ebp), %eax
+; CHECK-NEXT: addl %ebp, %ebp
+; CHECK-NEXT: movl (%esp), %ecx ## 4-byte Reload
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-NEXT: addl %edi, %ecx
+; CHECK-NEXT: addl %ecx, %ebp
+; CHECK-NEXT: addl %eax, %ebx
+; CHECK-NEXT: leal 15(%esi), %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_17: ## %bb23
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: subl $4, %esp
-; CHECK-NEXT: pushl %ebp
-; CHECK-NEXT: pushl %edx
+; CHECK-NEXT: pushl %esi
; CHECK-NEXT: pushl %ebx
-; CHECK-NEXT: movl %ebx, %esi
+; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: movl %ebp, %edi
+; CHECK-NEXT: movl %ebx, %ebp
; CHECK-NEXT: movl %edx, %ebx
; CHECK-NEXT: calll _memcpy
; CHECK-NEXT: movl %ebx, %edx
-; CHECK-NEXT: movl %esi, %ebx
+; CHECK-NEXT: movl %ebp, %ebx
+; CHECK-NEXT: movl %edi, %ebp
; CHECK-NEXT: addl $16, %esp
-; CHECK-NEXT: addl %ebp, %ebx
-; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
-; CHECK-NEXT: decl %edi
+; CHECK-NEXT: addl %esi, %ebp
+; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: decl %edx
; CHECK-NEXT: jne LBB0_17
; CHECK-NEXT: LBB0_18: ## %bb26
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax ## 4-byte Reload
-; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
-; CHECK-NEXT: addl %edx, %eax
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: movl (%esp), %ecx ## 4-byte Reload
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi ## 4-byte Reload
+; CHECK-NEXT: addl %ecx, %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl %esi, %edx
; CHECK-NEXT: jmp LBB0_23
; CHECK-NEXT: LBB0_19: ## %bb29
-; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: jle LBB0_22
; CHECK-NEXT: ## %bb.20: ## %bb.nph11
-; CHECK-NEXT: movl %edi, %esi
-; CHECK-NEXT: leal 15(%ebp), %eax
+; CHECK-NEXT: leal 15(%esi), %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
@@ -187,30 +188,32 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: LBB0_21: ## %bb30
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: subl $4, %esp
-; CHECK-NEXT: pushl %ebp
-; CHECK-NEXT: pushl %edx
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: movl %ebx, %ebp
; CHECK-NEXT: movl %edx, %ebx
; CHECK-NEXT: calll _memcpy
; CHECK-NEXT: movl %ebx, %edx
+; CHECK-NEXT: movl %ebp, %ebx
; CHECK-NEXT: addl $16, %esp
-; CHECK-NEXT: addl %ebp, %edi
-; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
-; CHECK-NEXT: decl %esi
+; CHECK-NEXT: addl %esi, %edi
+; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: decl %edx
; CHECK-NEXT: jne LBB0_21
; CHECK-NEXT: LBB0_22: ## %bb33
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
-; CHECK-NEXT: addl %edx, %ecx
+; CHECK-NEXT: movl (%esp), %ecx ## 4-byte Reload
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl %ecx, %edx
; CHECK-NEXT: LBB0_23: ## %bb33
-; CHECK-NEXT: movl %edx, %eax
+; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: shrl $31, %eax
-; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
; CHECK-NEXT: sarl %eax
; CHECK-NEXT: subl $4, %esp
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: pushl $128
-; CHECK-NEXT: pushl %ecx
+; CHECK-NEXT: pushl %edx
; CHECK-NEXT: calll _memset
; CHECK-NEXT: addl $44, %esp
; CHECK-NEXT: LBB0_25: ## %return
@@ -523,38 +526,38 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Reload
; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB1_9: ## %bb13
; CHECK-NEXT: ## =>This Loop Header: Depth=1
; CHECK-NEXT: ## Child Loop BB1_10 Depth 2
-; CHECK-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: andl $1, %ebx
; CHECK-NEXT: movl %edx, (%esp) ## 4-byte Spill
-; CHECK-NEXT: addl %edx, %ebx
-; CHECK-NEXT: imull {{[0-9]+}}(%esp), %ebx
-; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
+; CHECK-NEXT: addl %esi, %edx
+; CHECK-NEXT: imull {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB1_10: ## %bb14
; CHECK-NEXT: ## Parent Loop BB1_9 Depth=1
; CHECK-NEXT: ## => This Inner Loop Header: Depth=2
-; CHECK-NEXT: movzbl -2(%ebx,%esi,4), %edx
-; CHECK-NEXT: movb %dl, (%eax,%esi)
-; CHECK-NEXT: movzbl (%ebx,%esi,4), %edx
-; CHECK-NEXT: movb %dl, (%ecx,%esi)
+; CHECK-NEXT: movzbl -2(%edx,%esi,4), %ebx
+; CHECK-NEXT: movb %bl, (%eax,%esi)
+; CHECK-NEXT: movzbl (%edx,%esi,4), %ebx
+; CHECK-NEXT: movb %bl, (%ecx,%esi)
; CHECK-NEXT: incl %esi
; CHECK-NEXT: cmpl %ebp, %esi
; CHECK-NEXT: jb LBB1_10
; CHECK-NEXT: ## %bb.11: ## %bb17
; CHECK-NEXT: ## in Loop: Header=BB1_9 Depth=1
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Reload
-; CHECK-NEXT: incl %ebx
-; CHECK-NEXT: addl %ebp, %ecx
; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
-; CHECK-NEXT: addl $2, %edx
+; CHECK-NEXT: incl %edx
+; CHECK-NEXT: addl %ebp, %ecx
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi ## 4-byte Reload
+; CHECK-NEXT: addl $2, %esi
; CHECK-NEXT: addl %ebp, %eax
-; CHECK-NEXT: cmpl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: cmpl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
; CHECK-NEXT: jb LBB1_9
; CHECK-NEXT: LBB1_13: ## %bb20
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
diff --git a/llvm/test/CodeGen/X86/parity.ll b/llvm/test/CodeGen/X86/parity.ll
index 420f5ba..31a7f11 100644
--- a/llvm/test/CodeGen/X86/parity.ll
+++ b/llvm/test/CodeGen/X86/parity.ll
@@ -219,12 +219,12 @@ define i64 @parity_64(i64 %x) {
;
; X64-NOPOPCNT-LABEL: parity_64:
; X64-NOPOPCNT: # %bb.0:
-; X64-NOPOPCNT-NEXT: movq %rdi, %rax
-; X64-NOPOPCNT-NEXT: shrq $32, %rax
-; X64-NOPOPCNT-NEXT: xorl %edi, %eax
-; X64-NOPOPCNT-NEXT: movl %eax, %ecx
+; X64-NOPOPCNT-NEXT: movl %edi, %eax
+; X64-NOPOPCNT-NEXT: shrq $32, %rdi
+; X64-NOPOPCNT-NEXT: xorl %eax, %edi
+; X64-NOPOPCNT-NEXT: movl %edi, %ecx
; X64-NOPOPCNT-NEXT: shrl $16, %ecx
-; X64-NOPOPCNT-NEXT: xorl %eax, %ecx
+; X64-NOPOPCNT-NEXT: xorl %edi, %ecx
; X64-NOPOPCNT-NEXT: xorl %eax, %eax
; X64-NOPOPCNT-NEXT: xorb %ch, %cl
; X64-NOPOPCNT-NEXT: setnp %al
@@ -264,12 +264,12 @@ define i32 @parity_64_trunc(i64 %x) {
;
; X64-NOPOPCNT-LABEL: parity_64_trunc:
; X64-NOPOPCNT: # %bb.0:
-; X64-NOPOPCNT-NEXT: movq %rdi, %rax
-; X64-NOPOPCNT-NEXT: shrq $32, %rax
-; X64-NOPOPCNT-NEXT: xorl %edi, %eax
-; X64-NOPOPCNT-NEXT: movl %eax, %ecx
+; X64-NOPOPCNT-NEXT: movl %edi, %eax
+; X64-NOPOPCNT-NEXT: shrq $32, %rdi
+; X64-NOPOPCNT-NEXT: xorl %eax, %edi
+; X64-NOPOPCNT-NEXT: movl %edi, %ecx
; X64-NOPOPCNT-NEXT: shrl $16, %ecx
-; X64-NOPOPCNT-NEXT: xorl %eax, %ecx
+; X64-NOPOPCNT-NEXT: xorl %edi, %ecx
; X64-NOPOPCNT-NEXT: xorl %eax, %eax
; X64-NOPOPCNT-NEXT: xorb %ch, %cl
; X64-NOPOPCNT-NEXT: setnp %al
@@ -628,12 +628,12 @@ define i64 @parity_64_shift(i64 %0) {
;
; X64-NOPOPCNT-LABEL: parity_64_shift:
; X64-NOPOPCNT: # %bb.0:
-; X64-NOPOPCNT-NEXT: movq %rdi, %rax
-; X64-NOPOPCNT-NEXT: shrq $32, %rax
-; X64-NOPOPCNT-NEXT: xorl %edi, %eax
-; X64-NOPOPCNT-NEXT: movl %eax, %ecx
+; X64-NOPOPCNT-NEXT: movl %edi, %eax
+; X64-NOPOPCNT-NEXT: shrq $32, %rdi
+; X64-NOPOPCNT-NEXT: xorl %eax, %edi
+; X64-NOPOPCNT-NEXT: movl %edi, %ecx
; X64-NOPOPCNT-NEXT: shrl $16, %ecx
-; X64-NOPOPCNT-NEXT: xorl %eax, %ecx
+; X64-NOPOPCNT-NEXT: xorl %edi, %ecx
; X64-NOPOPCNT-NEXT: xorl %eax, %eax
; X64-NOPOPCNT-NEXT: xorb %ch, %cl
; X64-NOPOPCNT-NEXT: setnp %al
diff --git a/llvm/test/CodeGen/X86/pr166744.ll b/llvm/test/CodeGen/X86/pr166744.ll
index 21b25d8..ffdb68c 100644
--- a/llvm/test/CodeGen/X86/pr166744.ll
+++ b/llvm/test/CodeGen/X86/pr166744.ll
@@ -31,13 +31,13 @@ define i1 @PR166744(ptr %v, i64 %idx, i1 zeroext %b) {
; NOPOSTRA-LABEL: PR166744:
; NOPOSTRA: # %bb.0:
; NOPOSTRA-NEXT: movl %esi, %eax
-; NOPOSTRA-NEXT: shrl $3, %eax
-; NOPOSTRA-NEXT: andl $60, %eax
-; NOPOSTRA-NEXT: movl (%rdi,%rax), %ecx
-; NOPOSTRA-NEXT: btrl %esi, %ecx
-; NOPOSTRA-NEXT: shlxl %esi, %edx, %edx
-; NOPOSTRA-NEXT: orl %ecx, %edx
-; NOPOSTRA-NEXT: movl %edx, (%rdi,%rax)
+; NOPOSTRA-NEXT: shrl $3, %esi
+; NOPOSTRA-NEXT: andl $60, %esi
+; NOPOSTRA-NEXT: movl (%rdi,%rsi), %ecx
+; NOPOSTRA-NEXT: btrl %eax, %ecx
+; NOPOSTRA-NEXT: shlxl %eax, %edx, %eax
+; NOPOSTRA-NEXT: orl %ecx, %eax
+; NOPOSTRA-NEXT: movl %eax, (%rdi,%rsi)
; NOPOSTRA-NEXT: movq 16(%rdi), %rax
; NOPOSTRA-NEXT: movq (%rdi), %rcx
; NOPOSTRA-NEXT: movq 8(%rdi), %rdx
diff --git a/llvm/test/CodeGen/X86/rotate-extract.ll b/llvm/test/CodeGen/X86/rotate-extract.ll
index 8f046a4..26e6886 100644
--- a/llvm/test/CodeGen/X86/rotate-extract.ll
+++ b/llvm/test/CodeGen/X86/rotate-extract.ll
@@ -203,10 +203,10 @@ define i16 @no_extract_mul(i16 %i) nounwind {
; X64-LABEL: no_extract_mul:
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
-; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: def $edi killed $edi killed $rdi def $rdi
+; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $8, %edi
; X64-NEXT: leal (%rdi,%rdi,8), %ecx
+; X64-NEXT: leal (%rax,%rax,8), %eax
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: shrl $9, %eax
; X64-NEXT: orl %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/smul_fix.ll b/llvm/test/CodeGen/X86/smul_fix.ll
index ce56283..8cb0327 100644
--- a/llvm/test/CodeGen/X86/smul_fix.ll
+++ b/llvm/test/CodeGen/X86/smul_fix.ll
@@ -10,10 +10,10 @@ declare <4 x i32> @llvm.smul.fix.v4i32(<4 x i32>, <4 x i32>, i32)
define i32 @func(i32 %x, i32 %y) nounwind {
; X64-LABEL: func:
; X64: # %bb.0:
-; X64-NEXT: movslq %esi, %rax
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq %rax, %rcx
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movslq %esi, %rcx
+; X64-NEXT: movslq %edi, %rax
+; X64-NEXT: imulq %rcx, %rax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrq $32, %rax
; X64-NEXT: shldl $30, %ecx, %eax
; X64-NEXT: # kill: def $eax killed $eax killed $rax
diff --git a/llvm/test/CodeGen/X86/sshl_sat.ll b/llvm/test/CodeGen/X86/sshl_sat.ll
index e5ea911..a93be22 100644
--- a/llvm/test/CodeGen/X86/sshl_sat.ll
+++ b/llvm/test/CodeGen/X86/sshl_sat.ll
@@ -15,16 +15,16 @@ define i16 @func(i16 %x, i16 %y) nounwind {
; X64: # %bb.0:
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: movl %edi, %edx
-; X64-NEXT: shll %cl, %edx
-; X64-NEXT: movswl %dx, %esi
+; X64-NEXT: shll %cl, %edi
+; X64-NEXT: movswl %di, %esi
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: sarl %cl, %esi
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: testw %di, %di
+; X64-NEXT: testw %dx, %dx
; X64-NEXT: sets %al
; X64-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X64-NEXT: cmpw %si, %di
-; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: cmpw %si, %dx
+; X64-NEXT: cmovel %edi, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
@@ -33,17 +33,17 @@ define i16 @func(i16 %x, i16 %y) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %edx, %esi
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movswl %si, %edi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: movswl %dx, %edi
; X86-NEXT: sarl %cl, %edi
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: testw %dx, %dx
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %al
; X86-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %dx
-; X86-NEXT: cmovel %esi, %eax
+; X86-NEXT: cmpw %di, %si
+; X86-NEXT: cmovel %edx, %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
@@ -58,18 +58,18 @@ define i16 @func2(i8 %x, i8 %y) nounwind {
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: movsbl %dil, %eax
; X64-NEXT: addl %eax, %eax
-; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: movl %eax, %edx
+; X64-NEXT: xorl %esi, %esi
; X64-NEXT: testw %ax, %ax
-; X64-NEXT: sets %dl
-; X64-NEXT: addl $32767, %edx # imm = 0x7FFF
-; X64-NEXT: movl %eax, %esi
-; X64-NEXT: shll %cl, %esi
-; X64-NEXT: movswl %si, %edi
+; X64-NEXT: sets %sil
+; X64-NEXT: addl $32767, %esi # imm = 0x7FFF
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movswl %ax, %edi
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: sarl %cl, %edi
-; X64-NEXT: cmpw %di, %ax
-; X64-NEXT: cmovnel %edx, %esi
-; X64-NEXT: movswl %si, %eax
+; X64-NEXT: cmpw %di, %dx
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: cwtl
; X64-NEXT: shrl %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/sshl_sat_vec.ll b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
index 10dee14..ff76707 100644
--- a/llvm/test/CodeGen/X86/sshl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
@@ -365,119 +365,118 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: movswl %bx, %ebp
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll %cl, %edi
+; X86-NEXT: movswl %di, %ebp
; X86-NEXT: sarl %cl, %ebp
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: testw %di, %di
+; X86-NEXT: testw %bx, %bx
; X86-NEXT: sets %cl
; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
-; X86-NEXT: cmpw %bp, %di
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmovel %ebx, %ecx
+; X86-NEXT: cmpw %bp, %bx
+; X86-NEXT: movl %esi, %ebx
+; X86-NEXT: cmovel %edi, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: movswl %di, %ebx
-; X86-NEXT: sarl %cl, %ebx
-; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: testw %si, %si
-; X86-NEXT: sets %al
-; X86-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X86-NEXT: cmpw %bx, %si
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovel %edi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edx, %esi
; X86-NEXT: shll %cl, %esi
; X86-NEXT: movswl %si, %edi
; X86-NEXT: sarl %cl, %edi
-; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: testw %dx, %dx
-; X86-NEXT: sets %al
-; X86-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %dx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: testw %bx, %bx
+; X86-NEXT: sets %cl
+; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
+; X86-NEXT: movl %ecx, %ebp
+; X86-NEXT: cmpw %di, %bx
+; X86-NEXT: movl %edx, %edi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovel %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %eax, %edx
+; X86-NEXT: cmovel %esi, %ebp
; X86-NEXT: shll %cl, %edx
; X86-NEXT: movswl %dx, %esi
; X86-NEXT: sarl %cl, %esi
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %di, %di
; X86-NEXT: sets %bl
; X86-NEXT: addl $32767, %ebx # imm = 0x7FFF
-; X86-NEXT: cmpw %si, %ax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpw %si, %di
+; X86-NEXT: movl %eax, %esi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: cmovel %edx, %ebx
-; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NEXT: movl %eax, %edx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %edx
+; X86-NEXT: sarl %cl, %edx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: testw %si, %si
+; X86-NEXT: sets %cl
+; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
+; X86-NEXT: cmpw %dx, %si
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: cmovel %eax, %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: shll %cl, %edx
-; X86-NEXT: movswl %dx, %esi
-; X86-NEXT: sarl %cl, %esi
+; X86-NEXT: movswl %dx, %eax
+; X86-NEXT: sarl %cl, %eax
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %cl
; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
-; X86-NEXT: cmpw %si, %ax
+; X86-NEXT: cmpw %ax, %si
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmovel %edx, %ecx
-; X86-NEXT: movl %ecx, %ebp
+; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X86-NEXT: movl %eax, %edx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: movswl %dx, %esi
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %esi
; X86-NEXT: sarl %cl, %esi
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %dx, %dx
; X86-NEXT: sets %bl
; X86-NEXT: addl $32767, %ebx # imm = 0x7FFF
-; X86-NEXT: cmpw %si, %ax
-; X86-NEXT: cmovel %edx, %ebx
+; X86-NEXT: cmpw %si, %dx
+; X86-NEXT: cmovel %eax, %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %esi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movswl %si, %edi
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %edi
; X86-NEXT: sarl %cl, %edi
; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %dl
; X86-NEXT: addl $32767, %edx # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %ax
-; X86-NEXT: cmovel %esi, %edx
+; X86-NEXT: cmpw %di, %si
+; X86-NEXT: cmovel %eax, %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %esi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movswl %si, %edi
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %edi
; X86-NEXT: sarl %cl, %edi
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %cl
; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %ax
-; X86-NEXT: cmovel %esi, %ecx
+; X86-NEXT: cmpw %di, %si
+; X86-NEXT: cmovel %eax, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw %cx, 14(%eax)
; X86-NEXT: movw %dx, 12(%eax)
; X86-NEXT: movw %bx, 10(%eax)
-; X86-NEXT: movw %bp, 8(%eax)
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-NEXT: movw %cx, 8(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movw %cx, 6(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movw %cx, 4(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movw %cx, 2(%eax)
+; X86-NEXT: movw %bp, 2(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movw %cx, (%eax)
; X86-NEXT: addl $16, %esp
diff --git a/llvm/test/CodeGen/X86/stackmap.ll b/llvm/test/CodeGen/X86/stackmap.ll
index 72406aa..9bf88cb 100644
--- a/llvm/test/CodeGen/X86/stackmap.ll
+++ b/llvm/test/CodeGen/X86/stackmap.ll
@@ -1,7 +1,10 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -terminal-rule=0 | FileCheck %s
;
; Note: Print verbose stackmaps using -debug-only=stackmaps.
+; FIXME: Test should be fixed to produce the correct sized spill with
+; -terminal-rule=0 flag removed
+
; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
; Header
@@ -546,8 +549,8 @@ define void @clobberScratch(i32 %a) {
ret void
}
-; A stack frame which needs to be realigned at runtime (to meet alignment
-; criteria for values on the stack) does not have a fixed frame size.
+; A stack frame which needs to be realigned at runtime (to meet alignment
+; criteria for values on the stack) does not have a fixed frame size.
; CHECK-LABEL: .long L{{.*}}-_needsStackRealignment
; CHECK-NEXT: .short 0
; 0 locations
diff --git a/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll b/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
index 5bd624c..01fbafb 100644
--- a/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
+++ b/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
@@ -2429,126 +2429,126 @@ define void @vec384_v3i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.p
; SSE2-ONLY: # %bb.0:
; SSE2-ONLY-NEXT: movl (%rdi), %eax
; SSE2-ONLY-NEXT: notl %eax
-; SSE2-ONLY-NEXT: movw %ax, (%rsi)
; SSE2-ONLY-NEXT: movl %eax, %ecx
-; SSE2-ONLY-NEXT: shrl $16, %ecx
-; SSE2-ONLY-NEXT: movb %cl, 2(%rsi)
-; SSE2-ONLY-NEXT: movb %cl, 2(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, (%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 6(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 4(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 10(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 8(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 14(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 12(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 18(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 16(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 22(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 20(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 26(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 24(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 30(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 28(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 34(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 32(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 38(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 36(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 42(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 40(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 46(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 44(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 50(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 48(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 54(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 52(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 58(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 56(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 62(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 60(%rdx)
+; SSE2-ONLY-NEXT: movw %ax, (%rsi)
+; SSE2-ONLY-NEXT: shrl $16, %eax
+; SSE2-ONLY-NEXT: movb %al, 2(%rsi)
+; SSE2-ONLY-NEXT: movb %al, 2(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, (%rdx)
+; SSE2-ONLY-NEXT: movb %al, 6(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 4(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 10(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 8(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 14(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 12(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 18(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 16(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 22(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 20(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 26(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 24(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 30(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 28(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 34(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 32(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 38(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 36(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 42(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 40(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 46(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 44(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 50(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 48(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 54(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 52(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 58(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 56(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 62(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 60(%rdx)
; SSE2-ONLY-NEXT: retq
;
; SSE3-LABEL: vec384_v3i8:
; SSE3: # %bb.0:
; SSE3-NEXT: movl (%rdi), %eax
; SSE3-NEXT: notl %eax
-; SSE3-NEXT: movw %ax, (%rsi)
; SSE3-NEXT: movl %eax, %ecx
-; SSE3-NEXT: shrl $16, %ecx
-; SSE3-NEXT: movb %cl, 2(%rsi)
-; SSE3-NEXT: movb %cl, 2(%rdx)
-; SSE3-NEXT: movw %ax, (%rdx)
-; SSE3-NEXT: movb %cl, 6(%rdx)
-; SSE3-NEXT: movw %ax, 4(%rdx)
-; SSE3-NEXT: movb %cl, 10(%rdx)
-; SSE3-NEXT: movw %ax, 8(%rdx)
-; SSE3-NEXT: movb %cl, 14(%rdx)
-; SSE3-NEXT: movw %ax, 12(%rdx)
-; SSE3-NEXT: movb %cl, 18(%rdx)
-; SSE3-NEXT: movw %ax, 16(%rdx)
-; SSE3-NEXT: movb %cl, 22(%rdx)
-; SSE3-NEXT: movw %ax, 20(%rdx)
-; SSE3-NEXT: movb %cl, 26(%rdx)
-; SSE3-NEXT: movw %ax, 24(%rdx)
-; SSE3-NEXT: movb %cl, 30(%rdx)
-; SSE3-NEXT: movw %ax, 28(%rdx)
-; SSE3-NEXT: movb %cl, 34(%rdx)
-; SSE3-NEXT: movw %ax, 32(%rdx)
-; SSE3-NEXT: movb %cl, 38(%rdx)
-; SSE3-NEXT: movw %ax, 36(%rdx)
-; SSE3-NEXT: movb %cl, 42(%rdx)
-; SSE3-NEXT: movw %ax, 40(%rdx)
-; SSE3-NEXT: movb %cl, 46(%rdx)
-; SSE3-NEXT: movw %ax, 44(%rdx)
-; SSE3-NEXT: movb %cl, 50(%rdx)
-; SSE3-NEXT: movw %ax, 48(%rdx)
-; SSE3-NEXT: movb %cl, 54(%rdx)
-; SSE3-NEXT: movw %ax, 52(%rdx)
-; SSE3-NEXT: movb %cl, 58(%rdx)
-; SSE3-NEXT: movw %ax, 56(%rdx)
-; SSE3-NEXT: movb %cl, 62(%rdx)
-; SSE3-NEXT: movw %ax, 60(%rdx)
+; SSE3-NEXT: movw %ax, (%rsi)
+; SSE3-NEXT: shrl $16, %eax
+; SSE3-NEXT: movb %al, 2(%rsi)
+; SSE3-NEXT: movb %al, 2(%rdx)
+; SSE3-NEXT: movw %cx, (%rdx)
+; SSE3-NEXT: movb %al, 6(%rdx)
+; SSE3-NEXT: movw %cx, 4(%rdx)
+; SSE3-NEXT: movb %al, 10(%rdx)
+; SSE3-NEXT: movw %cx, 8(%rdx)
+; SSE3-NEXT: movb %al, 14(%rdx)
+; SSE3-NEXT: movw %cx, 12(%rdx)
+; SSE3-NEXT: movb %al, 18(%rdx)
+; SSE3-NEXT: movw %cx, 16(%rdx)
+; SSE3-NEXT: movb %al, 22(%rdx)
+; SSE3-NEXT: movw %cx, 20(%rdx)
+; SSE3-NEXT: movb %al, 26(%rdx)
+; SSE3-NEXT: movw %cx, 24(%rdx)
+; SSE3-NEXT: movb %al, 30(%rdx)
+; SSE3-NEXT: movw %cx, 28(%rdx)
+; SSE3-NEXT: movb %al, 34(%rdx)
+; SSE3-NEXT: movw %cx, 32(%rdx)
+; SSE3-NEXT: movb %al, 38(%rdx)
+; SSE3-NEXT: movw %cx, 36(%rdx)
+; SSE3-NEXT: movb %al, 42(%rdx)
+; SSE3-NEXT: movw %cx, 40(%rdx)
+; SSE3-NEXT: movb %al, 46(%rdx)
+; SSE3-NEXT: movw %cx, 44(%rdx)
+; SSE3-NEXT: movb %al, 50(%rdx)
+; SSE3-NEXT: movw %cx, 48(%rdx)
+; SSE3-NEXT: movb %al, 54(%rdx)
+; SSE3-NEXT: movw %cx, 52(%rdx)
+; SSE3-NEXT: movb %al, 58(%rdx)
+; SSE3-NEXT: movw %cx, 56(%rdx)
+; SSE3-NEXT: movb %al, 62(%rdx)
+; SSE3-NEXT: movw %cx, 60(%rdx)
; SSE3-NEXT: retq
;
; SSSE3-ONLY-LABEL: vec384_v3i8:
; SSSE3-ONLY: # %bb.0:
; SSSE3-ONLY-NEXT: movl (%rdi), %eax
; SSSE3-ONLY-NEXT: notl %eax
-; SSSE3-ONLY-NEXT: movw %ax, (%rsi)
; SSSE3-ONLY-NEXT: movl %eax, %ecx
-; SSSE3-ONLY-NEXT: shrl $16, %ecx
-; SSSE3-ONLY-NEXT: movb %cl, 2(%rsi)
-; SSSE3-ONLY-NEXT: movb %cl, 2(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, (%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 6(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 4(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 10(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 8(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 14(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 12(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 18(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 16(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 22(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 20(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 26(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 24(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 30(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 28(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 34(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 32(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 38(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 36(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 42(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 40(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 46(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 44(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 50(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 48(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 54(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 52(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 58(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 56(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 62(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 60(%rdx)
+; SSSE3-ONLY-NEXT: movw %ax, (%rsi)
+; SSSE3-ONLY-NEXT: shrl $16, %eax
+; SSSE3-ONLY-NEXT: movb %al, 2(%rsi)
+; SSSE3-ONLY-NEXT: movb %al, 2(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, (%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 6(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 4(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 10(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 8(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 14(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 12(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 18(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 16(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 22(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 20(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 26(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 24(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 30(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 28(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 34(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 32(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 38(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 36(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 42(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 40(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 46(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 44(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 50(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 48(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 54(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 52(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 58(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 56(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 62(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 60(%rdx)
; SSSE3-ONLY-NEXT: retq
;
; SSE41-LABEL: vec384_v3i8:
diff --git a/llvm/test/CodeGen/X86/twoaddr-lea.ll b/llvm/test/CodeGen/X86/twoaddr-lea.ll
index f20b777..3ad3e9a 100644
--- a/llvm/test/CodeGen/X86/twoaddr-lea.ll
+++ b/llvm/test/CodeGen/X86/twoaddr-lea.ll
@@ -65,10 +65,10 @@ entry:
define void @ham() {
; CHECK-LABEL: ham:
; CHECK: ## %bb.0: ## %bb
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: movq _global@GOTPCREL(%rip), %rdx
; CHECK-NEXT: movq _global2@GOTPCREL(%rip), %rsi
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: je LBB3_2
; CHECK-NEXT: .p2align 4
diff --git a/llvm/test/CodeGen/X86/umul_fix.ll b/llvm/test/CodeGen/X86/umul_fix.ll
index eacc714..5a68484 100644
--- a/llvm/test/CodeGen/X86/umul_fix.ll
+++ b/llvm/test/CodeGen/X86/umul_fix.ll
@@ -10,10 +10,10 @@ declare <4 x i32> @llvm.umul.fix.v4i32(<4 x i32>, <4 x i32>, i32)
define i32 @func(i32 %x, i32 %y) nounwind {
; X64-LABEL: func:
; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: movl %edi, %ecx
-; X64-NEXT: imulq %rax, %rcx
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: imulq %rcx, %rax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrq $32, %rax
; X64-NEXT: shldl $30, %ecx, %eax
; X64-NEXT: # kill: def $eax killed $eax killed $rax
diff --git a/llvm/test/CodeGen/X86/ushl_sat.ll b/llvm/test/CodeGen/X86/ushl_sat.ll
index e0e1ef7..9768e47 100644
--- a/llvm/test/CodeGen/X86/ushl_sat.ll
+++ b/llvm/test/CodeGen/X86/ushl_sat.ll
@@ -14,23 +14,23 @@ define i16 @func(i16 %x, i16 %y) nounwind {
; X64-LABEL: func:
; X64: # %bb.0:
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: movl %edi, %edx
-; X64-NEXT: shll %cl, %edx
-; X64-NEXT: movzwl %dx, %eax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll %cl, %edi
+; X64-NEXT: movzwl %di, %edx
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-NEXT: shrl %cl, %eax
-; X64-NEXT: cmpw %ax, %di
+; X64-NEXT: shrl %cl, %edx
+; X64-NEXT: cmpw %dx, %ax
; X64-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: cmovel %edi, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X86-LABEL: func:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %eax
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %eax, %edx
; X86-NEXT: shll %cl, %edx
; X86-NEXT: movzwl %dx, %esi
; X86-NEXT: shrl %cl, %esi
@@ -51,14 +51,14 @@ define i16 @func2(i8 %x, i8 %y) nounwind {
; X64-NEXT: movsbl %dil, %eax
; X64-NEXT: addl %eax, %eax
; X64-NEXT: movl %eax, %edx
-; X64-NEXT: shll %cl, %edx
-; X64-NEXT: movzwl %dx, %esi
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movzwl %ax, %esi
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shrl %cl, %esi
-; X64-NEXT: cmpw %si, %ax
-; X64-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X64-NEXT: cmovel %edx, %eax
-; X64-NEXT: cwtl
+; X64-NEXT: cmpw %si, %dx
+; X64-NEXT: movl $65535, %ecx # imm = 0xFFFF
+; X64-NEXT: cmovel %eax, %ecx
+; X64-NEXT: movswl %cx, %eax
; X64-NEXT: shrl %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/ushl_sat_vec.ll b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
index b8e83da..762088c 100644
--- a/llvm/test/CodeGen/X86/ushl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
@@ -300,95 +300,94 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: subl $12, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %ebp, %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: movzwl %bx, %edi
-; X86-NEXT: shrl %cl, %edi
-; X86-NEXT: cmpw %di, %ax
-; X86-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X86-NEXT: cmovnel %eax, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: movl %edx, %ecx
; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movzwl %ax, %edi
-; X86-NEXT: shrl %cl, %edi
-; X86-NEXT: cmpw %di, %si
+; X86-NEXT: movzwl %ax, %esi
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: cmpw %si, %dx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $65535, %esi # imm = 0xFFFF
-; X86-NEXT: cmovnel %esi, %eax
+; X86-NEXT: movl $65535, %edx # imm = 0xFFFF
+; X86-NEXT: cmovnel %edx, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebp, %eax
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movzwl %ax, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: cmpw %dx, %bp
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovnel %esi, %eax
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, %ebp
; X86-NEXT: shll %cl, %ebp
-; X86-NEXT: movzwl %bp, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: cmpw %dx, %si
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzwl %bp, %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: cmpw %ax, %di
+; X86-NEXT: movl %ebx, %eax
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovnel %eax, %ebp
-; X86-NEXT: movl %edx, %ebx
+; X86-NEXT: cmovnel %edx, %ebp
+; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: movzwl %bx, %esi
-; X86-NEXT: shrl %cl, %esi
-; X86-NEXT: cmpw %si, %dx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzwl %bx, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl $65535, %esi # imm = 0xFFFF
; X86-NEXT: cmovnel %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shll %cl, %edi
+; X86-NEXT: movzwl %di, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: cmovnel %esi, %edi
+; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %edx, %edi
+; X86-NEXT: shll %cl, %ebp
+; X86-NEXT: movzwl %bp, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: cmovnel %esi, %ebp
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %eax
; X86-NEXT: shll %cl, %edi
-; X86-NEXT: movzwl %di, %eax
-; X86-NEXT: shrl %cl, %eax
-; X86-NEXT: cmpw %ax, %dx
+; X86-NEXT: movzwl %di, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
; X86-NEXT: cmovnel %esi, %edi
+; X86-NEXT: movl $65535, %ebx # imm = 0xFFFF
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %edx, %esi
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movzwl %si, %eax
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: movzwl %dx, %eax
; X86-NEXT: shrl %cl, %eax
-; X86-NEXT: cmpw %ax, %dx
-; X86-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X86-NEXT: cmovnel %eax, %esi
+; X86-NEXT: cmpw %ax, %si
+; X86-NEXT: cmovnel %ebx, %edx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ebx
; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movzwl %ax, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmpw %dx, %cx
+; X86-NEXT: movzwl %ax, %esi
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: cmpw %si, %bx
; X86-NEXT: movl $65535, %ecx # imm = 0xFFFF
; X86-NEXT: cmovnel %ecx, %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movw %ax, 14(%ecx)
-; X86-NEXT: movw %si, 12(%ecx)
+; X86-NEXT: movw %dx, 12(%ecx)
; X86-NEXT: movw %di, 10(%ecx)
-; X86-NEXT: movw %bx, 8(%ecx)
-; X86-NEXT: movw %bp, 6(%ecx)
+; X86-NEXT: movw %bp, 8(%ecx)
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NEXT: movw %ax, 6(%ecx)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movw %ax, 4(%ecx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movw %ax, 2(%ecx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movw %ax, (%ecx)
; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: addl $12, %esp
+; X86-NEXT: addl $16, %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll b/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll
index b233855..324fe12 100644
--- a/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll
+++ b/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll
@@ -85,14 +85,14 @@ define <4 x i16> @smulfixsat(<4 x i16> %a) {
; CHECK-NEXT: movswl %dx, %edx
; CHECK-NEXT: leal (,%rdx,4), %esi
; CHECK-NEXT: movl %esi, %edi
-; CHECK-NEXT: shrl $16, %edi
-; CHECK-NEXT: shldw $1, %si, %di
+; CHECK-NEXT: shrl $16, %esi
+; CHECK-NEXT: shldw $1, %di, %si
; CHECK-NEXT: sarl $14, %edx
; CHECK-NEXT: cmpl $16384, %edx # imm = 0x4000
-; CHECK-NEXT: cmovgel %eax, %edi
+; CHECK-NEXT: cmovgel %eax, %esi
; CHECK-NEXT: cmpl $-16384, %edx # imm = 0xC000
-; CHECK-NEXT: cmovll %ecx, %edi
-; CHECK-NEXT: pinsrw $3, %edi, %xmm1
+; CHECK-NEXT: cmovll %ecx, %esi
+; CHECK-NEXT: pinsrw $3, %esi, %xmm1
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%t = call <4 x i16> @llvm.smul.fix.sat.v4i16(<4 x i16> <i16 1, i16 2, i16 3, i16 4>, <4 x i16> %a, i32 15)
@@ -106,19 +106,19 @@ define <4 x i16> @umulfixsat(<4 x i16> %a) {
; CHECK-NEXT: pextrw $2, %xmm0, %eax
; CHECK-NEXT: leal (%rax,%rax,2), %eax
; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: shrl $16, %edx
-; CHECK-NEXT: movl %edx, %ecx
-; CHECK-NEXT: shldw $1, %ax, %cx
-; CHECK-NEXT: cmpl $32768, %edx # imm = 0x8000
+; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shldw $1, %dx, %cx
+; CHECK-NEXT: cmpl $32768, %eax # imm = 0x8000
; CHECK-NEXT: movl $65535, %eax # imm = 0xFFFF
; CHECK-NEXT: cmovael %eax, %ecx
; CHECK-NEXT: pextrw $1, %xmm0, %edx
; CHECK-NEXT: addl %edx, %edx
; CHECK-NEXT: movl %edx, %esi
-; CHECK-NEXT: shrl $16, %esi
-; CHECK-NEXT: movl %esi, %edi
-; CHECK-NEXT: shldw $1, %dx, %di
-; CHECK-NEXT: cmpl $32768, %esi # imm = 0x8000
+; CHECK-NEXT: shrl $16, %edx
+; CHECK-NEXT: movl %edx, %edi
+; CHECK-NEXT: shldw $1, %si, %di
+; CHECK-NEXT: cmpl $32768, %edx # imm = 0x8000
; CHECK-NEXT: cmovael %eax, %edi
; CHECK-NEXT: movd %xmm0, %edx
; CHECK-NEXT: xorl %esi, %esi
@@ -133,10 +133,10 @@ define <4 x i16> @umulfixsat(<4 x i16> %a) {
; CHECK-NEXT: pextrw $3, %xmm0, %ecx
; CHECK-NEXT: shll $2, %ecx
; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: shrl $16, %edx
-; CHECK-NEXT: movl %edx, %esi
-; CHECK-NEXT: shldw $1, %cx, %si
-; CHECK-NEXT: cmpl $32768, %edx # imm = 0x8000
+; CHECK-NEXT: shrl $16, %ecx
+; CHECK-NEXT: movl %ecx, %esi
+; CHECK-NEXT: shldw $1, %dx, %si
+; CHECK-NEXT: cmpl $32768, %ecx # imm = 0x8000
; CHECK-NEXT: cmovael %eax, %esi
; CHECK-NEXT: pinsrw $3, %esi, %xmm1
; CHECK-NEXT: movdqa %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
index 320dce8..6cb4323 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
@@ -397,8 +397,8 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -409,8 +409,8 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -421,8 +421,8 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512VL-NEXT: vpmovw2m %ymm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -722,8 +722,8 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -734,8 +734,8 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -746,8 +746,8 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -974,13 +974,13 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
-; AVX512BW-NEXT: movq %rax, %rcx
-; AVX512BW-NEXT: shrq $32, %rcx
-; AVX512BW-NEXT: xorl %eax, %ecx
-; AVX512BW-NEXT: movl %ecx, %eax
-; AVX512BW-NEXT: shrl $16, %eax
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrq $32, %rax
; AVX512BW-NEXT: xorl %ecx, %eax
-; AVX512BW-NEXT: xorb %ah, %al
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrl $16, %ecx
+; AVX512BW-NEXT: xorl %eax, %ecx
+; AVX512BW-NEXT: xorb %ch, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -990,13 +990,13 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512VL-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovb2m %zmm0, %k0
; AVX512VL-NEXT: kmovq %k0, %rax
-; AVX512VL-NEXT: movq %rax, %rcx
-; AVX512VL-NEXT: shrq $32, %rcx
-; AVX512VL-NEXT: xorl %eax, %ecx
-; AVX512VL-NEXT: movl %ecx, %eax
-; AVX512VL-NEXT: shrl $16, %eax
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrq $32, %rax
; AVX512VL-NEXT: xorl %ecx, %eax
-; AVX512VL-NEXT: xorb %ah, %al
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrl $16, %ecx
+; AVX512VL-NEXT: xorl %eax, %ecx
+; AVX512VL-NEXT: xorb %ch, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -1211,8 +1211,8 @@ define i1 @icmp0_v16i8_v16i1(<16 x i8>) nounwind {
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1222,8 +1222,8 @@ define i1 @icmp0_v16i8_v16i1(<16 x i8>) nounwind {
; AVX512VL-NEXT: vptestnmb %xmm0, %xmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: retq
%a = icmp eq <16 x i8> %0, zeroinitializer
@@ -1427,8 +1427,8 @@ define i1 @icmp0_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -1439,8 +1439,8 @@ define i1 @icmp0_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512BW-NEXT: vptestnmw %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1450,8 +1450,8 @@ define i1 @icmp0_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512VL-NEXT: vptestnmw %ymm0, %ymm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -1756,8 +1756,8 @@ define i1 @icmp0_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -1767,8 +1767,8 @@ define i1 @icmp0_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1778,8 +1778,8 @@ define i1 @icmp0_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512VL-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -2010,13 +2010,13 @@ define i1 @icmp0_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
-; AVX512BW-NEXT: movq %rax, %rcx
-; AVX512BW-NEXT: shrq $32, %rcx
-; AVX512BW-NEXT: xorl %eax, %ecx
-; AVX512BW-NEXT: movl %ecx, %eax
-; AVX512BW-NEXT: shrl $16, %eax
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrq $32, %rax
; AVX512BW-NEXT: xorl %ecx, %eax
-; AVX512BW-NEXT: xorb %ah, %al
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrl $16, %ecx
+; AVX512BW-NEXT: xorl %eax, %ecx
+; AVX512BW-NEXT: xorb %ch, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2025,13 +2025,13 @@ define i1 @icmp0_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovq %k0, %rax
-; AVX512VL-NEXT: movq %rax, %rcx
-; AVX512VL-NEXT: shrq $32, %rcx
-; AVX512VL-NEXT: xorl %eax, %ecx
-; AVX512VL-NEXT: movl %ecx, %eax
-; AVX512VL-NEXT: shrl $16, %eax
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrq $32, %rax
; AVX512VL-NEXT: xorl %ecx, %eax
-; AVX512VL-NEXT: xorb %ah, %al
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrl $16, %ecx
+; AVX512VL-NEXT: xorl %eax, %ecx
+; AVX512VL-NEXT: xorb %ch, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -2240,8 +2240,8 @@ define i1 @icmp_v16i8_v16i1(<16 x i8>, <16 x i8>) nounwind {
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2251,8 +2251,8 @@ define i1 @icmp_v16i8_v16i1(<16 x i8>, <16 x i8>) nounwind {
; AVX512VL-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: retq
%a = icmp eq <16 x i8> %0, %1
@@ -2504,8 +2504,8 @@ define i1 @icmp_v16i16_v16i1(<16 x i16>, <16 x i16>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -2517,8 +2517,8 @@ define i1 @icmp_v16i16_v16i1(<16 x i16>, <16 x i16>) nounwind {
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2528,8 +2528,8 @@ define i1 @icmp_v16i16_v16i1(<16 x i16>, <16 x i16>) nounwind {
; AVX512VL-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -2845,8 +2845,8 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) nounwind {
; AVX512F-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -2856,8 +2856,8 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) nounwind {
; AVX512BW-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2867,8 +2867,8 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) nounwind {
; AVX512VL-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -3097,13 +3097,13 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>, <64 x i8>) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
-; AVX512BW-NEXT: movq %rax, %rcx
-; AVX512BW-NEXT: shrq $32, %rcx
-; AVX512BW-NEXT: xorl %eax, %ecx
-; AVX512BW-NEXT: movl %ecx, %eax
-; AVX512BW-NEXT: shrl $16, %eax
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrq $32, %rax
; AVX512BW-NEXT: xorl %ecx, %eax
-; AVX512BW-NEXT: xorb %ah, %al
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrl $16, %ecx
+; AVX512BW-NEXT: xorl %eax, %ecx
+; AVX512BW-NEXT: xorb %ch, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -3112,13 +3112,13 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>, <64 x i8>) nounwind {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512VL-NEXT: kmovq %k0, %rax
-; AVX512VL-NEXT: movq %rax, %rcx
-; AVX512VL-NEXT: shrq $32, %rcx
-; AVX512VL-NEXT: xorl %eax, %ecx
-; AVX512VL-NEXT: movl %ecx, %eax
-; AVX512VL-NEXT: shrl $16, %eax
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrq $32, %rax
; AVX512VL-NEXT: xorl %ecx, %eax
-; AVX512VL-NEXT: xorb %ah, %al
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrl $16, %ecx
+; AVX512VL-NEXT: xorl %eax, %ecx
+; AVX512VL-NEXT: xorb %ch, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
index 3c98eba6..1c3d27f 100644
--- a/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -777,31 +777,31 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edx, (%esp)
+; FALLBACK18-NEXT: movl %eax, %ecx
; FALLBACK18-NEXT: andb $12, %bl
-; FALLBACK18-NEXT: movzbl %bl, %esi
-; FALLBACK18-NEXT: movl 4(%esp,%esi), %edi
-; FALLBACK18-NEXT: movl 8(%esp,%esi), %ebx
-; FALLBACK18-NEXT: shrxl %eax, %edi, %ebp
-; FALLBACK18-NEXT: movl %eax, %edx
-; FALLBACK18-NEXT: notb %dl
-; FALLBACK18-NEXT: leal (%ebx,%ebx), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK18-NEXT: orl %ebp, %ecx
-; FALLBACK18-NEXT: shrxl %eax, (%esp,%esi), %ebp
-; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK18-NEXT: orl %ebp, %edi
-; FALLBACK18-NEXT: shrxl %eax, %ebx, %ebx
-; FALLBACK18-NEXT: movl 12(%esp,%esi), %esi
-; FALLBACK18-NEXT: shrxl %eax, %esi, %eax
-; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %edx
-; FALLBACK18-NEXT: orl %ebx, %edx
+; FALLBACK18-NEXT: movzbl %bl, %edi
+; FALLBACK18-NEXT: movl 4(%esp,%edi), %ebx
+; FALLBACK18-NEXT: movl 8(%esp,%edi), %esi
+; FALLBACK18-NEXT: shrxl %ecx, %ebx, %ebp
+; FALLBACK18-NEXT: notb %al
+; FALLBACK18-NEXT: leal (%esi,%esi), %edx
+; FALLBACK18-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK18-NEXT: orl %ebp, %edx
+; FALLBACK18-NEXT: shrxl %ecx, (%esp,%edi), %ebp
+; FALLBACK18-NEXT: addl %ebx, %ebx
+; FALLBACK18-NEXT: shlxl %eax, %ebx, %ebx
+; FALLBACK18-NEXT: orl %ebp, %ebx
+; FALLBACK18-NEXT: movl 12(%esp,%edi), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK18-NEXT: shlxl %eax, %ebp, %eax
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK18-NEXT: orl %esi, %eax
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ecx
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK18-NEXT: movl %eax, 12(%esi)
-; FALLBACK18-NEXT: movl %edx, 8(%esi)
-; FALLBACK18-NEXT: movl %edi, (%esi)
-; FALLBACK18-NEXT: movl %ecx, 4(%esi)
+; FALLBACK18-NEXT: movl %ecx, 12(%esi)
+; FALLBACK18-NEXT: movl %eax, 8(%esi)
+; FALLBACK18-NEXT: movl %ebx, (%esi)
+; FALLBACK18-NEXT: movl %edx, 4(%esi)
; FALLBACK18-NEXT: addl $44, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -962,42 +962,43 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: pushl %ebx
; FALLBACK22-NEXT: pushl %edi
; FALLBACK22-NEXT: pushl %esi
-; FALLBACK22-NEXT: subl $44, %esp
+; FALLBACK22-NEXT: subl $60, %esp
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %eax
; FALLBACK22-NEXT: shlb $3, %al
; FALLBACK22-NEXT: xorps %xmm1, %xmm1
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movaps %xmm0, (%esp)
-; FALLBACK22-NEXT: andb $12, %cl
-; FALLBACK22-NEXT: movzbl %cl, %edi
-; FALLBACK22-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movl %eax, %ecx
-; FALLBACK22-NEXT: notb %cl
-; FALLBACK22-NEXT: movl 4(%esp,%edi), %ebp
-; FALLBACK22-NEXT: movl 8(%esp,%edi), %esi
-; FALLBACK22-NEXT: leal (%ebp,%ebp), %edx
-; FALLBACK22-NEXT: shlxl %ecx, %edx, %edx
-; FALLBACK22-NEXT: orl %ebx, %edx
-; FALLBACK22-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK22-NEXT: shrxl %eax, %ebp, %ebp
-; FALLBACK22-NEXT: movl 12(%esp,%edi), %edi
-; FALLBACK22-NEXT: shrxl %eax, %edi, %eax
-; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK22-NEXT: orl %ebx, %edi
-; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ecx, %esi, %ecx
-; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: andb $12, %dl
+; FALLBACK22-NEXT: movzbl %dl, %edi
+; FALLBACK22-NEXT: shrxl %ecx, 16(%esp,%edi), %ebp
+; FALLBACK22-NEXT: notb %al
+; FALLBACK22-NEXT: movl 20(%esp,%edi), %edx
+; FALLBACK22-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK22-NEXT: addl %edx, %edx
+; FALLBACK22-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK22-NEXT: orl %ebp, %edx
+; FALLBACK22-NEXT: movl 28(%esp,%edi), %ebp
+; FALLBACK22-NEXT: leal (%ebp,%ebp), %edi
+; FALLBACK22-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK22-NEXT: shrxl %ecx, %ebx, %esi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: addl %ebx, %ebx
+; FALLBACK22-NEXT: shlxl %eax, %ebx, %eax
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: shrxl %ecx, %ebp, %ecx
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK22-NEXT: movl %eax, 12(%esi)
-; FALLBACK22-NEXT: movl %ecx, 4(%esi)
+; FALLBACK22-NEXT: movl %ecx, 12(%esi)
+; FALLBACK22-NEXT: movl %eax, 4(%esi)
; FALLBACK22-NEXT: movl %edi, 8(%esi)
; FALLBACK22-NEXT: movl %edx, (%esi)
-; FALLBACK22-NEXT: addl $44, %esp
+; FALLBACK22-NEXT: addl $60, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
; FALLBACK22-NEXT: popl %ebx
@@ -1152,42 +1153,43 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: pushl %ebx
; FALLBACK26-NEXT: pushl %edi
; FALLBACK26-NEXT: pushl %esi
-; FALLBACK26-NEXT: subl $44, %esp
+; FALLBACK26-NEXT: subl $60, %esp
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %eax
; FALLBACK26-NEXT: shlb $3, %al
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: vmovaps %xmm0, (%esp)
-; FALLBACK26-NEXT: andb $12, %cl
-; FALLBACK26-NEXT: movzbl %cl, %edi
-; FALLBACK26-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: movl %eax, %ecx
-; FALLBACK26-NEXT: notb %cl
-; FALLBACK26-NEXT: movl 4(%esp,%edi), %ebp
-; FALLBACK26-NEXT: movl 8(%esp,%edi), %esi
-; FALLBACK26-NEXT: leal (%ebp,%ebp), %edx
-; FALLBACK26-NEXT: shlxl %ecx, %edx, %edx
-; FALLBACK26-NEXT: orl %ebx, %edx
-; FALLBACK26-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK26-NEXT: shrxl %eax, %ebp, %ebp
-; FALLBACK26-NEXT: movl 12(%esp,%edi), %edi
-; FALLBACK26-NEXT: shrxl %eax, %edi, %eax
-; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK26-NEXT: orl %ebx, %edi
-; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ecx, %esi, %ecx
-; FALLBACK26-NEXT: orl %ebp, %ecx
+; FALLBACK26-NEXT: andb $12, %dl
+; FALLBACK26-NEXT: movzbl %dl, %edi
+; FALLBACK26-NEXT: shrxl %ecx, 16(%esp,%edi), %ebp
+; FALLBACK26-NEXT: notb %al
+; FALLBACK26-NEXT: movl 20(%esp,%edi), %edx
+; FALLBACK26-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK26-NEXT: addl %edx, %edx
+; FALLBACK26-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK26-NEXT: orl %ebp, %edx
+; FALLBACK26-NEXT: movl 28(%esp,%edi), %ebp
+; FALLBACK26-NEXT: leal (%ebp,%ebp), %edi
+; FALLBACK26-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK26-NEXT: shrxl %ecx, %ebx, %esi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: addl %ebx, %ebx
+; FALLBACK26-NEXT: shlxl %eax, %ebx, %eax
+; FALLBACK26-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: shrxl %ecx, %ebp, %ecx
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK26-NEXT: movl %eax, 12(%esi)
-; FALLBACK26-NEXT: movl %ecx, 4(%esi)
+; FALLBACK26-NEXT: movl %ecx, 12(%esi)
+; FALLBACK26-NEXT: movl %eax, 4(%esi)
; FALLBACK26-NEXT: movl %edi, 8(%esi)
; FALLBACK26-NEXT: movl %edx, (%esi)
-; FALLBACK26-NEXT: addl $44, %esp
+; FALLBACK26-NEXT: addl $60, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
; FALLBACK26-NEXT: popl %ebx
@@ -1342,42 +1344,43 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: pushl %ebx
; FALLBACK30-NEXT: pushl %edi
; FALLBACK30-NEXT: pushl %esi
-; FALLBACK30-NEXT: subl $44, %esp
+; FALLBACK30-NEXT: subl $60, %esp
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %eax
; FALLBACK30-NEXT: shlb $3, %al
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: vmovaps %xmm0, (%esp)
-; FALLBACK30-NEXT: andb $12, %cl
-; FALLBACK30-NEXT: movzbl %cl, %edi
-; FALLBACK30-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: movl %eax, %ecx
-; FALLBACK30-NEXT: notb %cl
-; FALLBACK30-NEXT: movl 4(%esp,%edi), %ebp
-; FALLBACK30-NEXT: movl 8(%esp,%edi), %esi
-; FALLBACK30-NEXT: leal (%ebp,%ebp), %edx
-; FALLBACK30-NEXT: shlxl %ecx, %edx, %edx
-; FALLBACK30-NEXT: orl %ebx, %edx
-; FALLBACK30-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK30-NEXT: shrxl %eax, %ebp, %ebp
-; FALLBACK30-NEXT: movl 12(%esp,%edi), %edi
-; FALLBACK30-NEXT: shrxl %eax, %edi, %eax
-; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK30-NEXT: orl %ebx, %edi
-; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ecx, %esi, %ecx
-; FALLBACK30-NEXT: orl %ebp, %ecx
+; FALLBACK30-NEXT: andb $12, %dl
+; FALLBACK30-NEXT: movzbl %dl, %edi
+; FALLBACK30-NEXT: shrxl %ecx, 16(%esp,%edi), %ebp
+; FALLBACK30-NEXT: notb %al
+; FALLBACK30-NEXT: movl 20(%esp,%edi), %edx
+; FALLBACK30-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK30-NEXT: addl %edx, %edx
+; FALLBACK30-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK30-NEXT: orl %ebp, %edx
+; FALLBACK30-NEXT: movl 28(%esp,%edi), %ebp
+; FALLBACK30-NEXT: leal (%ebp,%ebp), %edi
+; FALLBACK30-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK30-NEXT: shrxl %ecx, %ebx, %esi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: addl %ebx, %ebx
+; FALLBACK30-NEXT: shlxl %eax, %ebx, %eax
+; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: shrxl %ecx, %ebp, %ecx
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK30-NEXT: movl %eax, 12(%esi)
-; FALLBACK30-NEXT: movl %ecx, 4(%esi)
+; FALLBACK30-NEXT: movl %ecx, 12(%esi)
+; FALLBACK30-NEXT: movl %eax, 4(%esi)
; FALLBACK30-NEXT: movl %edi, 8(%esi)
; FALLBACK30-NEXT: movl %edx, (%esi)
-; FALLBACK30-NEXT: addl $44, %esp
+; FALLBACK30-NEXT: addl $60, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
; FALLBACK30-NEXT: popl %ebx
@@ -1784,41 +1787,41 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl 4(%ecx), %esi
; FALLBACK18-NEXT: movl 8(%ecx), %edi
; FALLBACK18-NEXT: movl 12(%ecx), %ecx
-; FALLBACK18-NEXT: movzbl (%eax), %eax
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: shlb $3, %bl
+; FALLBACK18-NEXT: movzbl (%eax), %ebx
+; FALLBACK18-NEXT: movl %ebx, %eax
+; FALLBACK18-NEXT: shlb $3, %al
; FALLBACK18-NEXT: xorps %xmm0, %xmm0
; FALLBACK18-NEXT: movaps %xmm0, (%esp)
; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: andb $12, %al
-; FALLBACK18-NEXT: negb %al
-; FALLBACK18-NEXT: movsbl %al, %edx
-; FALLBACK18-NEXT: movl 16(%esp,%edx), %edi
-; FALLBACK18-NEXT: movl 20(%esp,%edx), %ecx
-; FALLBACK18-NEXT: shlxl %ebx, %ecx, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %ebp
-; FALLBACK18-NEXT: movl %ebx, %eax
+; FALLBACK18-NEXT: movl %eax, %ecx
+; FALLBACK18-NEXT: andb $12, %bl
+; FALLBACK18-NEXT: negb %bl
+; FALLBACK18-NEXT: movsbl %bl, %esi
+; FALLBACK18-NEXT: movl 16(%esp,%esi), %ebx
+; FALLBACK18-NEXT: movl 20(%esp,%esi), %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edi
; FALLBACK18-NEXT: notb %al
-; FALLBACK18-NEXT: shrl %edi
-; FALLBACK18-NEXT: shrxl %eax, %edi, %edi
-; FALLBACK18-NEXT: orl %esi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, 28(%esp,%edx), %esi
-; FALLBACK18-NEXT: movl 24(%esp,%edx), %edx
-; FALLBACK18-NEXT: shlxl %ebx, %edx, %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ebp
+; FALLBACK18-NEXT: shrl %ebx
+; FALLBACK18-NEXT: shrxl %eax, %ebx, %ebx
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: shlxl %ecx, 28(%esp,%esi), %edi
+; FALLBACK18-NEXT: movl 24(%esp,%esi), %esi
+; FALLBACK18-NEXT: shlxl %ecx, %esi, %ecx
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK18-NEXT: orl %edi, %esi
; FALLBACK18-NEXT: shrl %edx
-; FALLBACK18-NEXT: shrxl %eax, %edx, %edx
-; FALLBACK18-NEXT: orl %esi, %edx
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %eax, %ecx, %eax
-; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK18-NEXT: movl %ebp, (%ecx)
; FALLBACK18-NEXT: movl %eax, 8(%ecx)
-; FALLBACK18-NEXT: movl %edx, 12(%ecx)
-; FALLBACK18-NEXT: movl %edi, 4(%ecx)
+; FALLBACK18-NEXT: movl %esi, 12(%ecx)
+; FALLBACK18-NEXT: movl %ebx, 4(%ecx)
; FALLBACK18-NEXT: addl $44, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -1983,39 +1986,39 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %eax
; FALLBACK22-NEXT: shlb $3, %al
; FALLBACK22-NEXT: xorps %xmm1, %xmm1
; FALLBACK22-NEXT: movaps %xmm1, (%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $12, %cl
-; FALLBACK22-NEXT: negb %cl
-; FALLBACK22-NEXT: movsbl %cl, %ecx
-; FALLBACK22-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl 24(%esp,%ecx), %edx
-; FALLBACK22-NEXT: shlxl %eax, %edx, %edi
-; FALLBACK22-NEXT: movl %eax, %ebx
-; FALLBACK22-NEXT: notb %bl
-; FALLBACK22-NEXT: shrl %edx
-; FALLBACK22-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK22-NEXT: orl %esi, %edx
-; FALLBACK22-NEXT: movl 20(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl %esi, %ebp
+; FALLBACK22-NEXT: movl %eax, %ecx
+; FALLBACK22-NEXT: andb $12, %dl
+; FALLBACK22-NEXT: negb %dl
+; FALLBACK22-NEXT: movsbl %dl, %edx
+; FALLBACK22-NEXT: shlxl %ecx, 28(%esp,%edx), %edi
+; FALLBACK22-NEXT: notb %al
+; FALLBACK22-NEXT: movl 24(%esp,%edx), %esi
+; FALLBACK22-NEXT: shlxl %ecx, %esi, %ebx
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK22-NEXT: orl %edi, %esi
+; FALLBACK22-NEXT: movl 20(%esp,%edx), %edi
+; FALLBACK22-NEXT: movl %edi, %ebp
; FALLBACK22-NEXT: shrl %ebp
-; FALLBACK22-NEXT: shrxl %ebx, %ebp, %ebp
-; FALLBACK22-NEXT: orl %edi, %ebp
-; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK22-NEXT: movl 16(%esp,%ecx), %ecx
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %eax
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %esi, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK22-NEXT: movl %eax, (%esi)
-; FALLBACK22-NEXT: movl %ecx, 4(%esi)
-; FALLBACK22-NEXT: movl %ebp, 8(%esi)
-; FALLBACK22-NEXT: movl %edx, 12(%esi)
+; FALLBACK22-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK22-NEXT: orl %ebx, %ebp
+; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: movl 16(%esp,%edx), %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edx, %ecx
+; FALLBACK22-NEXT: shrl %edx
+; FALLBACK22-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %ecx, (%edx)
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
+; FALLBACK22-NEXT: movl %ebp, 8(%edx)
+; FALLBACK22-NEXT: movl %esi, 12(%edx)
; FALLBACK22-NEXT: addl $44, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
@@ -2175,39 +2178,39 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %eax
; FALLBACK26-NEXT: shlb $3, %al
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovaps %xmm1, (%esp)
; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $12, %cl
-; FALLBACK26-NEXT: negb %cl
-; FALLBACK26-NEXT: movsbl %cl, %ecx
-; FALLBACK26-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
-; FALLBACK26-NEXT: movl 24(%esp,%ecx), %edx
-; FALLBACK26-NEXT: shlxl %eax, %edx, %edi
-; FALLBACK26-NEXT: movl %eax, %ebx
-; FALLBACK26-NEXT: notb %bl
-; FALLBACK26-NEXT: shrl %edx
-; FALLBACK26-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK26-NEXT: orl %esi, %edx
-; FALLBACK26-NEXT: movl 20(%esp,%ecx), %esi
-; FALLBACK26-NEXT: movl %esi, %ebp
+; FALLBACK26-NEXT: movl %eax, %ecx
+; FALLBACK26-NEXT: andb $12, %dl
+; FALLBACK26-NEXT: negb %dl
+; FALLBACK26-NEXT: movsbl %dl, %edx
+; FALLBACK26-NEXT: shlxl %ecx, 28(%esp,%edx), %edi
+; FALLBACK26-NEXT: notb %al
+; FALLBACK26-NEXT: movl 24(%esp,%edx), %esi
+; FALLBACK26-NEXT: shlxl %ecx, %esi, %ebx
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK26-NEXT: orl %edi, %esi
+; FALLBACK26-NEXT: movl 20(%esp,%edx), %edi
+; FALLBACK26-NEXT: movl %edi, %ebp
; FALLBACK26-NEXT: shrl %ebp
-; FALLBACK26-NEXT: shrxl %ebx, %ebp, %ebp
-; FALLBACK26-NEXT: orl %edi, %ebp
-; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK26-NEXT: movl 16(%esp,%ecx), %ecx
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %eax
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %esi, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK26-NEXT: movl %eax, (%esi)
-; FALLBACK26-NEXT: movl %ecx, 4(%esi)
-; FALLBACK26-NEXT: movl %ebp, 8(%esi)
-; FALLBACK26-NEXT: movl %edx, 12(%esi)
+; FALLBACK26-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK26-NEXT: orl %ebx, %ebp
+; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK26-NEXT: movl 16(%esp,%edx), %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edx, %ecx
+; FALLBACK26-NEXT: shrl %edx
+; FALLBACK26-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK26-NEXT: orl %edi, %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %ecx, (%edx)
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
+; FALLBACK26-NEXT: movl %ebp, 8(%edx)
+; FALLBACK26-NEXT: movl %esi, 12(%edx)
; FALLBACK26-NEXT: addl $44, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
@@ -2367,39 +2370,39 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %eax
; FALLBACK30-NEXT: shlb $3, %al
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovaps %xmm1, (%esp)
; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $12, %cl
-; FALLBACK30-NEXT: negb %cl
-; FALLBACK30-NEXT: movsbl %cl, %ecx
-; FALLBACK30-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
-; FALLBACK30-NEXT: movl 24(%esp,%ecx), %edx
-; FALLBACK30-NEXT: shlxl %eax, %edx, %edi
-; FALLBACK30-NEXT: movl %eax, %ebx
-; FALLBACK30-NEXT: notb %bl
-; FALLBACK30-NEXT: shrl %edx
-; FALLBACK30-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK30-NEXT: orl %esi, %edx
-; FALLBACK30-NEXT: movl 20(%esp,%ecx), %esi
-; FALLBACK30-NEXT: movl %esi, %ebp
+; FALLBACK30-NEXT: movl %eax, %ecx
+; FALLBACK30-NEXT: andb $12, %dl
+; FALLBACK30-NEXT: negb %dl
+; FALLBACK30-NEXT: movsbl %dl, %edx
+; FALLBACK30-NEXT: shlxl %ecx, 28(%esp,%edx), %edi
+; FALLBACK30-NEXT: notb %al
+; FALLBACK30-NEXT: movl 24(%esp,%edx), %esi
+; FALLBACK30-NEXT: shlxl %ecx, %esi, %ebx
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK30-NEXT: orl %edi, %esi
+; FALLBACK30-NEXT: movl 20(%esp,%edx), %edi
+; FALLBACK30-NEXT: movl %edi, %ebp
; FALLBACK30-NEXT: shrl %ebp
-; FALLBACK30-NEXT: shrxl %ebx, %ebp, %ebp
-; FALLBACK30-NEXT: orl %edi, %ebp
-; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK30-NEXT: movl 16(%esp,%ecx), %ecx
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %eax
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %esi, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK30-NEXT: movl %eax, (%esi)
-; FALLBACK30-NEXT: movl %ecx, 4(%esi)
-; FALLBACK30-NEXT: movl %ebp, 8(%esi)
-; FALLBACK30-NEXT: movl %edx, 12(%esi)
+; FALLBACK30-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK30-NEXT: orl %ebx, %ebp
+; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK30-NEXT: movl 16(%esp,%edx), %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edx, %ecx
+; FALLBACK30-NEXT: shrl %edx
+; FALLBACK30-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK30-NEXT: orl %edi, %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %ecx, (%edx)
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
+; FALLBACK30-NEXT: movl %ebp, 8(%edx)
+; FALLBACK30-NEXT: movl %esi, 12(%edx)
; FALLBACK30-NEXT: addl $44, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
@@ -2833,31 +2836,31 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, %ecx
; X86-NO-SHLD-HAVE-BMI2-NEXT: andb $12, %bl
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movzbl %bl, %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 4(%esp,%esi), %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 8(%esp,%esi), %ebx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, %edi, %ebp
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, %edx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: notb %dl
-; X86-NO-SHLD-HAVE-BMI2-NEXT: leal (%ebx,%ebx), %ecx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %ecx, %ecx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %ecx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, (%esp,%esi), %ebp
-; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %edi, %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %edi, %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, %ebx, %ebx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 12(%esp,%esi), %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: sarxl %eax, %esi, %eax
-; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %esi, %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %esi, %edx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebx, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movzbl %bl, %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 4(%esp,%edi), %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 8(%esp,%edi), %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %ecx, %ebx, %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: notb %al
+; X86-NO-SHLD-HAVE-BMI2-NEXT: leal (%esi,%esi), %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %eax, %edx, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %ecx, (%esp,%edi), %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %ebx, %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %eax, %ebx, %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 12(%esp,%edi), %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: leal (%edi,%edi), %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %eax, %ebp, %eax
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %ecx, %esi, %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %esi, %eax
+; X86-NO-SHLD-HAVE-BMI2-NEXT: sarxl %ecx, %edi, %ecx
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, 12(%esi)
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edx, 8(%esi)
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edi, (%esi)
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, 4(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, 12(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, 8(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ebx, (%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edx, 4(%esi)
; X86-NO-SHLD-HAVE-BMI2-NEXT: addl $44, %esp
; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %esi
; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %edi
@@ -3208,30 +3211,30 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $24, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: shrxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: lshr_32bytes:
@@ -3355,30 +3358,30 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %esi
; FALLBACK6-NEXT: andb $24, %cl
; FALLBACK6-NEXT: movzbl %cl, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: shrxq %rsi, -72(%rsp,%rcx), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK6-NEXT: leaq (%rcx,%rcx), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: shrxq %rsi, %rcx, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: lshr_32bytes:
@@ -3487,35 +3490,35 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-LABEL: lshr_32bytes:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: leal (,%rcx,8), %eax
+; FALLBACK10-NEXT: movzbl (%rsi), %eax
+; FALLBACK10-NEXT: leal (,%rax,8), %ecx
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: andb $24, %cl
-; FALLBACK10-NEXT: movzbl %cl, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movl %ecx, %esi
+; FALLBACK10-NEXT: andb $24, %al
+; FALLBACK10-NEXT: movzbl %al, %eax
+; FALLBACK10-NEXT: shrxq %rsi, -72(%rsp,%rax), %rdi
+; FALLBACK10-NEXT: notb %cl
+; FALLBACK10-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rax), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK10-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK10-NEXT: orq %rdi, %rcx
+; FALLBACK10-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK10-NEXT: movq %rax, 24(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -3623,35 +3626,35 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-LABEL: lshr_32bytes:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: leal (,%rcx,8), %eax
+; FALLBACK14-NEXT: movzbl (%rsi), %eax
+; FALLBACK14-NEXT: leal (,%rax,8), %ecx
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: andb $24, %cl
-; FALLBACK14-NEXT: movzbl %cl, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movl %ecx, %esi
+; FALLBACK14-NEXT: andb $24, %al
+; FALLBACK14-NEXT: movzbl %al, %eax
+; FALLBACK14-NEXT: shrxq %rsi, -72(%rsp,%rax), %rdi
+; FALLBACK14-NEXT: notb %cl
+; FALLBACK14-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rax), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK14-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK14-NEXT: orq %rdi, %rcx
+; FALLBACK14-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK14-NEXT: movq %rax, 24(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -3914,81 +3917,75 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %ebx, %eax
-; FALLBACK18-NEXT: shlb $3, %al
+; FALLBACK18-NEXT: movl %ebx, %ecx
+; FALLBACK18-NEXT: shlb $3, %cl
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, %eax
; FALLBACK18-NEXT: andb $28, %bl
-; FALLBACK18-NEXT: movzbl %bl, %edi
-; FALLBACK18-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK18-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %eax, %esi, %edx
+; FALLBACK18-NEXT: movzbl %bl, %esi
+; FALLBACK18-NEXT: movl 36(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl 40(%esp,%esi), %ebp
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: notb %cl
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ebx
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, 32(%esp,%esi), %edi
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: orl %edi, %edx
; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %eax, %edx
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: notb %dl
-; FALLBACK18-NEXT: leal (%ecx,%ecx), %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %ebx, %ecx
-; FALLBACK18-NEXT: shrxl %ebx, 32(%esp,%edi), %ebx
-; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
-; FALLBACK18-NEXT: orl %ebx, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 48(%esp,%edi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: leal (%eax,%eax), %ebx
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %esi
-; FALLBACK18-NEXT: movl 44(%esp,%edi), %ebp
-; FALLBACK18-NEXT: movl %ecx, %eax
-; FALLBACK18-NEXT: shrxl %ecx, %ebp, %ebx
-; FALLBACK18-NEXT: orl %ebx, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl 48(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %ebx
+; FALLBACK18-NEXT: movl 44(%esp,%esi), %edx
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, %ebp, %edi
+; FALLBACK18-NEXT: movl %eax, %ebp
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 56(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 52(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
-; FALLBACK18-NEXT: orl %esi, %ecx
-; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 56(%esp,%esi), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: movl 52(%esp,%esi), %eax
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %ebx
+; FALLBACK18-NEXT: orl %ebx, %edx
+; FALLBACK18-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %edx, %eax, %esi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %ebx, %ebp, %eax
-; FALLBACK18-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebx
-; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK18-NEXT: orl %eax, %edi
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %ebx, 28(%eax)
-; FALLBACK18-NEXT: movl %edi, 24(%eax)
-; FALLBACK18-NEXT: movl %esi, 16(%eax)
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, (%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: movl 60(%esp,%esi), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %esi, %esi
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edi
+; FALLBACK18-NEXT: movl %esi, 28(%edi)
+; FALLBACK18-NEXT: movl %ecx, 24(%edi)
+; FALLBACK18-NEXT: movl %eax, 16(%edi)
+; FALLBACK18-NEXT: movl %edx, 20(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, (%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edi)
; FALLBACK18-NEXT: addl $108, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -4261,72 +4258,70 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %edx
-; FALLBACK22-NEXT: shlb $3, %dl
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %ecx
+; FALLBACK22-NEXT: shlb $3, %cl
; FALLBACK22-NEXT: xorps %xmm2, %xmm2
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $28, %cl
-; FALLBACK22-NEXT: movzbl %cl, %edi
-; FALLBACK22-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %edx, %eax
-; FALLBACK22-NEXT: notb %al
-; FALLBACK22-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK22-NEXT: movl %eax, %ebp
-; FALLBACK22-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK22-NEXT: shrxl %edx, %ecx, %ebx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK22-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: andb $28, %dl
+; FALLBACK22-NEXT: movzbl %dl, %ebx
+; FALLBACK22-NEXT: shrxl %eax, 32(%esp,%ebx), %edx
+; FALLBACK22-NEXT: movl %eax, %edi
+; FALLBACK22-NEXT: notb %cl
+; FALLBACK22-NEXT: movl 36(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %eax, %ebx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK22-NEXT: shlxl %ebp, %ebx, %eax
-; FALLBACK22-NEXT: movl %ebp, %ecx
-; FALLBACK22-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK22-NEXT: shrxl %edx, %ebx, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: leal (%eax,%eax), %esi
+; FALLBACK22-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK22-NEXT: orl %edx, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK22-NEXT: addl %ebx, %ebx
+; FALLBACK22-NEXT: movl 48(%esp,%ebx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK22-NEXT: movl 44(%esp,%ebx), %edx
+; FALLBACK22-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %edx, %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK22-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK22-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK22-NEXT: movl %edi, %edx
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 56(%esp,%ebx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK22-NEXT: shlxl %ecx, %ebp, %ebp
+; FALLBACK22-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK22-NEXT: shrxl %edi, %eax, %edi
+; FALLBACK22-NEXT: orl %edi, %ebp
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ecx, %eax, %edi
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK22-NEXT: movl 60(%esp,%ebx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
; FALLBACK22-NEXT: shlxl %ecx, %ebx, %ebx
-; FALLBACK22-NEXT: orl %ebp, %ebx
-; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %eax
-; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: movl %ecx, %edx
-; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK22-NEXT: orl %ebp, %edi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %esi, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK22-NEXT: movl %eax, 28(%edx)
-; FALLBACK22-NEXT: movl %ecx, 4(%edx)
-; FALLBACK22-NEXT: movl %edi, 24(%edx)
-; FALLBACK22-NEXT: movl %ebx, 16(%edx)
+; FALLBACK22-NEXT: orl %eax, %ebx
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: orl %ecx, %eax
+; FALLBACK22-NEXT: shrxl %edx, %esi, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %ecx, 28(%edx)
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
+; FALLBACK22-NEXT: movl %ebx, 24(%edx)
+; FALLBACK22-NEXT: movl %edi, 16(%edx)
+; FALLBACK22-NEXT: movl %ebp, 20(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK22-NEXT: movl %eax, 8(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -4585,70 +4580,68 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %edx
-; FALLBACK26-NEXT: shlb $3, %dl
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %ecx
+; FALLBACK26-NEXT: shlb $3, %cl
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $28, %cl
-; FALLBACK26-NEXT: movzbl %cl, %edi
-; FALLBACK26-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %edx, %eax
-; FALLBACK26-NEXT: notb %al
-; FALLBACK26-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK26-NEXT: movl %eax, %ebp
-; FALLBACK26-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK26-NEXT: shrxl %edx, %ecx, %ebx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK26-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: andb $28, %dl
+; FALLBACK26-NEXT: movzbl %dl, %ebx
+; FALLBACK26-NEXT: shrxl %eax, 32(%esp,%ebx), %edx
+; FALLBACK26-NEXT: movl %eax, %edi
+; FALLBACK26-NEXT: notb %cl
+; FALLBACK26-NEXT: movl 36(%esp,%ebx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %eax, %ebx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK26-NEXT: shlxl %ebp, %ebx, %eax
-; FALLBACK26-NEXT: movl %ebp, %ecx
-; FALLBACK26-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK26-NEXT: shrxl %edx, %ebx, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: leal (%eax,%eax), %esi
+; FALLBACK26-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK26-NEXT: orl %edx, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK26-NEXT: addl %ebx, %ebx
+; FALLBACK26-NEXT: movl 48(%esp,%ebx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK26-NEXT: movl 44(%esp,%ebx), %edx
+; FALLBACK26-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %edx, %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK26-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK26-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK26-NEXT: movl %edi, %edx
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 56(%esp,%ebx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK26-NEXT: shlxl %ecx, %ebp, %ebp
+; FALLBACK26-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK26-NEXT: shrxl %edi, %eax, %edi
+; FALLBACK26-NEXT: orl %edi, %ebp
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ecx, %eax, %edi
+; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK26-NEXT: movl 60(%esp,%ebx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
; FALLBACK26-NEXT: shlxl %ecx, %ebx, %ebx
-; FALLBACK26-NEXT: orl %ebp, %ebx
-; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %eax
-; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: movl %ecx, %edx
-; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK26-NEXT: orl %ebp, %edi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %esi, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK26-NEXT: movl %eax, 28(%edx)
-; FALLBACK26-NEXT: movl %ecx, 4(%edx)
-; FALLBACK26-NEXT: movl %edi, 24(%edx)
-; FALLBACK26-NEXT: movl %ebx, 16(%edx)
+; FALLBACK26-NEXT: orl %eax, %ebx
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %ecx, %eax
+; FALLBACK26-NEXT: shrxl %edx, %esi, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %ecx, 28(%edx)
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
+; FALLBACK26-NEXT: movl %ebx, 24(%edx)
+; FALLBACK26-NEXT: movl %edi, 16(%edx)
+; FALLBACK26-NEXT: movl %ebp, 20(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK26-NEXT: movl %eax, 8(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -4906,70 +4899,68 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %edx
-; FALLBACK30-NEXT: shlb $3, %dl
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %ecx
+; FALLBACK30-NEXT: shlb $3, %cl
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $28, %cl
-; FALLBACK30-NEXT: movzbl %cl, %edi
-; FALLBACK30-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %edx, %eax
-; FALLBACK30-NEXT: notb %al
-; FALLBACK30-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK30-NEXT: movl %eax, %ebp
-; FALLBACK30-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK30-NEXT: shrxl %edx, %ecx, %ebx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK30-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: andb $28, %dl
+; FALLBACK30-NEXT: movzbl %dl, %ebx
+; FALLBACK30-NEXT: shrxl %eax, 32(%esp,%ebx), %edx
+; FALLBACK30-NEXT: movl %eax, %edi
+; FALLBACK30-NEXT: notb %cl
+; FALLBACK30-NEXT: movl 36(%esp,%ebx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %eax, %ebx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK30-NEXT: shlxl %ebp, %ebx, %eax
-; FALLBACK30-NEXT: movl %ebp, %ecx
-; FALLBACK30-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK30-NEXT: shrxl %edx, %ebx, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: leal (%eax,%eax), %esi
+; FALLBACK30-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK30-NEXT: orl %edx, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK30-NEXT: addl %ebx, %ebx
+; FALLBACK30-NEXT: movl 48(%esp,%ebx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK30-NEXT: movl 44(%esp,%ebx), %edx
+; FALLBACK30-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %edx, %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK30-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK30-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK30-NEXT: movl %edi, %edx
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 56(%esp,%ebx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK30-NEXT: shlxl %ecx, %ebp, %ebp
+; FALLBACK30-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK30-NEXT: shrxl %edi, %eax, %edi
+; FALLBACK30-NEXT: orl %edi, %ebp
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ecx, %eax, %edi
+; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK30-NEXT: movl 60(%esp,%ebx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
; FALLBACK30-NEXT: shlxl %ecx, %ebx, %ebx
-; FALLBACK30-NEXT: orl %ebp, %ebx
-; FALLBACK30-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %eax
-; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: movl %ecx, %edx
-; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK30-NEXT: orl %ebp, %edi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %esi, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK30-NEXT: movl %eax, 28(%edx)
-; FALLBACK30-NEXT: movl %ecx, 4(%edx)
-; FALLBACK30-NEXT: movl %edi, 24(%edx)
-; FALLBACK30-NEXT: movl %ebx, 16(%edx)
+; FALLBACK30-NEXT: orl %eax, %ebx
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %ecx, %eax
+; FALLBACK30-NEXT: shrxl %edx, %esi, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %ecx, 28(%edx)
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
+; FALLBACK30-NEXT: movl %ebx, 24(%edx)
+; FALLBACK30-NEXT: movl %edi, 16(%edx)
+; FALLBACK30-NEXT: movl %ebp, 20(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK30-NEXT: movl %eax, 8(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -5157,30 +5148,30 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $6, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx,4), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx,4), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi,4), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi,4), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: shrxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: lshr_32bytes_dwordOff:
@@ -5307,30 +5298,30 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %esi
; FALLBACK6-NEXT: andb $6, %cl
; FALLBACK6-NEXT: movzbl %cl, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: shrxq %rsi, -72(%rsp,%rcx,4), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK6-NEXT: leaq (%rcx,%rcx), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: shrxq %rsi, %rcx, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: lshr_32bytes_dwordOff:
@@ -5441,36 +5432,36 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK10-LABEL: lshr_32bytes_dwordOff:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: movl %ecx, %eax
-; FALLBACK10-NEXT: shlb $5, %al
+; FALLBACK10-NEXT: movzbl (%rsi), %eax
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: shlb $5, %cl
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: andb $6, %cl
-; FALLBACK10-NEXT: movzbl %cl, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movl %ecx, %esi
+; FALLBACK10-NEXT: andb $6, %al
+; FALLBACK10-NEXT: movzbl %al, %eax
+; FALLBACK10-NEXT: shrxq %rsi, -72(%rsp,%rax,4), %rdi
+; FALLBACK10-NEXT: notb %cl
+; FALLBACK10-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rax,4), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK10-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK10-NEXT: orq %rdi, %rcx
+; FALLBACK10-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK10-NEXT: movq %rax, 24(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -5580,36 +5571,36 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK14-LABEL: lshr_32bytes_dwordOff:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: movl %ecx, %eax
-; FALLBACK14-NEXT: shlb $5, %al
+; FALLBACK14-NEXT: movzbl (%rsi), %eax
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: shlb $5, %cl
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: andb $6, %cl
-; FALLBACK14-NEXT: movzbl %cl, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movl %ecx, %esi
+; FALLBACK14-NEXT: andb $6, %al
+; FALLBACK14-NEXT: movzbl %al, %eax
+; FALLBACK14-NEXT: shrxq %rsi, -72(%rsp,%rax,4), %rdi
+; FALLBACK14-NEXT: notb %cl
+; FALLBACK14-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rax,4), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK14-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK14-NEXT: orq %rdi, %rcx
+; FALLBACK14-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK14-NEXT: movq %rax, 24(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -6025,31 +6016,31 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $24, %sil
; FALLBACK2-NEXT: negb %sil
-; FALLBACK2-NEXT: movsbq %sil, %rsi
-; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
-; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %r8
-; FALLBACK2-NEXT: shlxq %rax, -16(%rsp,%rsi), %r9
-; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %r10
-; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movsbq %sil, %rdi
+; FALLBACK2-NEXT: movq -40(%rsp,%rdi), %r8
+; FALLBACK2-NEXT: movq -32(%rsp,%rdi), %rsi
+; FALLBACK2-NEXT: shlxq %rcx, %rsi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK2-NEXT: shrq %r8
+; FALLBACK2-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: shlxq %rcx, -16(%rsp,%rdi), %r9
+; FALLBACK2-NEXT: movq -24(%rsp,%rdi), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK2-NEXT: shrq %rdi
; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
; FALLBACK2-NEXT: shrq %rsi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: shrq %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, (%rdx)
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rax
+; FALLBACK2-NEXT: orq %rcx, %rax
+; FALLBACK2-NEXT: movq %r10, (%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: shl_32bytes:
@@ -6167,38 +6158,38 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6: # %bb.0:
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
-; FALLBACK6-NEXT: movzbl (%rsi), %ecx
-; FALLBACK6-NEXT: leal (,%rcx,8), %eax
+; FALLBACK6-NEXT: movzbl (%rsi), %esi
+; FALLBACK6-NEXT: leal (,%rsi,8), %eax
; FALLBACK6-NEXT: xorps %xmm2, %xmm2
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: andb $24, %cl
-; FALLBACK6-NEXT: negb %cl
-; FALLBACK6-NEXT: movsbq %cl, %rcx
-; FALLBACK6-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK6-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK6-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK6-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movl %eax, %ecx
+; FALLBACK6-NEXT: andb $24, %sil
+; FALLBACK6-NEXT: negb %sil
+; FALLBACK6-NEXT: movsbq %sil, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK6-NEXT: shrq %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK6-NEXT: orq %rdi, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK6-NEXT: shrq %rsi
+; FALLBACK6-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK6-NEXT: orq %r9, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK6-NEXT: shrq %rdi
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: shrq %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r8, %rcx
-; FALLBACK6-NEXT: shrq %r9
-; FALLBACK6-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, (%rdx)
+; FALLBACK6-NEXT: movq %rcx, (%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK6-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: shl_32bytes:
@@ -6308,36 +6299,36 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-LABEL: shl_32bytes:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: leal (,%rcx,8), %eax
+; FALLBACK10-NEXT: movzbl (%rsi), %esi
+; FALLBACK10-NEXT: leal (,%rsi,8), %eax
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: andb $24, %cl
-; FALLBACK10-NEXT: negb %cl
-; FALLBACK10-NEXT: movsbq %cl, %rcx
-; FALLBACK10-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK10-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK10-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK10-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: andb $24, %sil
+; FALLBACK10-NEXT: negb %sil
+; FALLBACK10-NEXT: movsbq %sil, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK10-NEXT: shrq %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %rdi, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK10-NEXT: shrq %rsi
+; FALLBACK10-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK10-NEXT: orq %r9, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK10-NEXT: shrq %rdi
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: shrq %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r8, %rcx
-; FALLBACK10-NEXT: shrq %r9
-; FALLBACK10-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, (%rdx)
+; FALLBACK10-NEXT: movq %rcx, (%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK10-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -6446,36 +6437,36 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-LABEL: shl_32bytes:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: leal (,%rcx,8), %eax
+; FALLBACK14-NEXT: movzbl (%rsi), %esi
+; FALLBACK14-NEXT: leal (,%rsi,8), %eax
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: andb $24, %cl
-; FALLBACK14-NEXT: negb %cl
-; FALLBACK14-NEXT: movsbq %cl, %rcx
-; FALLBACK14-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK14-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK14-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK14-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: andb $24, %sil
+; FALLBACK14-NEXT: negb %sil
+; FALLBACK14-NEXT: movsbq %sil, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK14-NEXT: shrq %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %rdi, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK14-NEXT: shrq %rsi
+; FALLBACK14-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK14-NEXT: orq %r9, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK14-NEXT: shrq %rdi
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: shrq %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r8, %rcx
-; FALLBACK14-NEXT: shrq %r9
-; FALLBACK14-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, (%rdx)
+; FALLBACK14-NEXT: movq %rcx, (%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK14-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -6745,71 +6736,75 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, %eax
+; FALLBACK18-NEXT: movl %eax, %ebp
; FALLBACK18-NEXT: andb $28, %bl
; FALLBACK18-NEXT: negb %bl
; FALLBACK18-NEXT: movsbl %bl, %esi
; FALLBACK18-NEXT: movl 64(%esp,%esi), %ebx
; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 68(%esp,%esi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, %eax, %edi
-; FALLBACK18-NEXT: movl %edx, %ecx
-; FALLBACK18-NEXT: notb %cl
+; FALLBACK18-NEXT: movl 68(%esp,%esi), %ecx
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %eax, %ecx, %edi
+; FALLBACK18-NEXT: notb %dl
; FALLBACK18-NEXT: shrl %ebx
-; FALLBACK18-NEXT: shrxl %ecx, %ebx, %ebx
+; FALLBACK18-NEXT: shrxl %edx, %ebx, %ebx
; FALLBACK18-NEXT: orl %edi, %ebx
; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 72(%esp,%esi), %ebx
; FALLBACK18-NEXT: movl %ebx, %edi
; FALLBACK18-NEXT: shrl %edi
-; FALLBACK18-NEXT: shrxl %ecx, %edi, %eax
+; FALLBACK18-NEXT: shrxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 76(%esp,%esi), %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: movl %ebp, %esi
+; FALLBACK18-NEXT: shlxl %ebp, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ecx, %eax, %eax
-; FALLBACK18-NEXT: orl %ebx, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 80(%esp,%esi), %ebx
-; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %esi, %ebx, %ebx
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %edx, %ecx, %ecx
+; FALLBACK18-NEXT: orl %ebx, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK18-NEXT: movl 80(%esp,%ebp), %ecx
+; FALLBACK18-NEXT: movl %ecx, %ebx
; FALLBACK18-NEXT: shrl %ebx
-; FALLBACK18-NEXT: shrxl %ecx, %ebx, %eax
-; FALLBACK18-NEXT: movl 84(%esp,%esi), %ebx
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %ebp
+; FALLBACK18-NEXT: shrxl %edx, %ebx, %eax
+; FALLBACK18-NEXT: movl 84(%esp,%ebp), %ebx
+; FALLBACK18-NEXT: shlxl %esi, %ebx, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %esi, %ecx, %ecx
+; FALLBACK18-NEXT: movl %esi, %eax
; FALLBACK18-NEXT: shrl %edi
-; FALLBACK18-NEXT: shrxl %ecx, %edi, %edi
-; FALLBACK18-NEXT: orl %eax, %edi
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, 92(%esp,%esi), %ebp
-; FALLBACK18-NEXT: movl 88(%esp,%esi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: shrxl %edx, %edi, %edi
+; FALLBACK18-NEXT: orl %ecx, %edi
+; FALLBACK18-NEXT: shlxl %esi, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: shlxl %esi, 92(%esp,%ecx), %ebp
+; FALLBACK18-NEXT: movl 88(%esp,%ecx), %esi
+; FALLBACK18-NEXT: shlxl %eax, %esi, %ecx
; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
; FALLBACK18-NEXT: orl %ebp, %esi
; FALLBACK18-NEXT: shrl %ebx
-; FALLBACK18-NEXT: shrxl %ecx, %ebx, %edx
-; FALLBACK18-NEXT: orl %eax, %edx
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, (%eax)
-; FALLBACK18-NEXT: movl %edx, 24(%eax)
-; FALLBACK18-NEXT: movl %esi, 28(%eax)
-; FALLBACK18-NEXT: movl %edi, 16(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: shrxl %edx, %ebx, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edx
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: movl %ecx, (%edx)
+; FALLBACK18-NEXT: movl %eax, 24(%edx)
+; FALLBACK18-NEXT: movl %esi, 28(%edx)
+; FALLBACK18-NEXT: movl %edi, 16(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 20(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edx)
; FALLBACK18-NEXT: addl $108, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -7085,78 +7080,76 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: shlb $3, %al
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %ecx
+; FALLBACK22-NEXT: shlb $3, %cl
; FALLBACK22-NEXT: xorps %xmm2, %xmm2
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $28, %cl
-; FALLBACK22-NEXT: negb %cl
-; FALLBACK22-NEXT: movsbl %cl, %edx
-; FALLBACK22-NEXT: movl 84(%esp,%edx), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK22-NEXT: movl 80(%esp,%edx), %esi
-; FALLBACK22-NEXT: shlxl %eax, %esi, %edi
-; FALLBACK22-NEXT: movl %eax, %ebx
-; FALLBACK22-NEXT: notb %bl
-; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 76(%esp,%edx), %ecx
-; FALLBACK22-NEXT: movl %ecx, %esi
-; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %edi, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK22-NEXT: movl 72(%esp,%edx), %esi
-; FALLBACK22-NEXT: movl %esi, %edi
+; FALLBACK22-NEXT: movl %ecx, %ebx
+; FALLBACK22-NEXT: andb $28, %dl
+; FALLBACK22-NEXT: negb %dl
+; FALLBACK22-NEXT: movsbl %dl, %edx
+; FALLBACK22-NEXT: movl 84(%esp,%edx), %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %esi
+; FALLBACK22-NEXT: notb %cl
+; FALLBACK22-NEXT: movl 80(%esp,%edx), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: shrl %edi
-; FALLBACK22-NEXT: shrxl %ebx, %edi, %edi
-; FALLBACK22-NEXT: orl %ecx, %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %esi, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, %esi, %ecx
-; FALLBACK22-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK22-NEXT: movl 76(%esp,%edx), %esi
; FALLBACK22-NEXT: movl %esi, %edi
; FALLBACK22-NEXT: shrl %edi
-; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
-; FALLBACK22-NEXT: orl %ecx, %ebp
-; FALLBACK22-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %ebp, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: movl 72(%esp,%edx), %edi
+; FALLBACK22-NEXT: movl %edi, %ebp
+; FALLBACK22-NEXT: shrl %ebp
+; FALLBACK22-NEXT: shrxl %ecx, %ebp, %ebp
+; FALLBACK22-NEXT: orl %esi, %ebp
+; FALLBACK22-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %esi
+; FALLBACK22-NEXT: movl 68(%esp,%edx), %ebp
+; FALLBACK22-NEXT: movl %ebp, %edi
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebp
; FALLBACK22-NEXT: movl 64(%esp,%edx), %esi
-; FALLBACK22-NEXT: movl %esi, %ecx
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %edi, %ecx
-; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, 92(%esp,%edx), %edi
-; FALLBACK22-NEXT: movl 88(%esp,%edx), %edx
-; FALLBACK22-NEXT: shlxl %eax, %edx, %esi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK22-NEXT: orl %ebp, %edi
; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: orl %esi, %eax
-; FALLBACK22-NEXT: shrl %edx
-; FALLBACK22-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK22-NEXT: orl %edi, %edx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; FALLBACK22-NEXT: movl %edi, (%esi)
-; FALLBACK22-NEXT: movl %edx, 28(%esi)
-; FALLBACK22-NEXT: movl %eax, 24(%esi)
-; FALLBACK22-NEXT: movl %ecx, 4(%esi)
-; FALLBACK22-NEXT: movl %ebp, 8(%esi)
+; FALLBACK22-NEXT: shrxl %ecx, %eax, %esi
+; FALLBACK22-NEXT: movl 88(%esp,%edx), %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %ebp
+; FALLBACK22-NEXT: orl %ebp, %esi
+; FALLBACK22-NEXT: shlxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %ebx, 92(%esp,%edx), %edx
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ecx, %eax, %eax
+; FALLBACK22-NEXT: orl %edx, %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movl %ebp, (%ecx)
+; FALLBACK22-NEXT: movl %eax, 28(%ecx)
+; FALLBACK22-NEXT: movl %esi, 24(%ecx)
+; FALLBACK22-NEXT: movl %edi, 4(%ecx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 8(%ecx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 12(%esi)
+; FALLBACK22-NEXT: movl %eax, 12(%ecx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 16(%esi)
+; FALLBACK22-NEXT: movl %eax, 16(%ecx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 20(%esi)
+; FALLBACK22-NEXT: movl %eax, 20(%ecx)
; FALLBACK22-NEXT: addl $108, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
@@ -7410,76 +7403,74 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %eax
; FALLBACK26-NEXT: shlb $3, %al
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $28, %cl
-; FALLBACK26-NEXT: negb %cl
-; FALLBACK26-NEXT: movsbl %cl, %edx
-; FALLBACK26-NEXT: movl 84(%esp,%edx), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK26-NEXT: movl 80(%esp,%edx), %esi
-; FALLBACK26-NEXT: shlxl %eax, %esi, %edi
; FALLBACK26-NEXT: movl %eax, %ebx
-; FALLBACK26-NEXT: notb %bl
-; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 76(%esp,%edx), %ecx
-; FALLBACK26-NEXT: movl %ecx, %esi
-; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %edi, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK26-NEXT: movl 72(%esp,%edx), %esi
-; FALLBACK26-NEXT: movl %esi, %edi
+; FALLBACK26-NEXT: andb $28, %dl
+; FALLBACK26-NEXT: negb %dl
+; FALLBACK26-NEXT: movsbl %dl, %edx
+; FALLBACK26-NEXT: movl 84(%esp,%edx), %ecx
+; FALLBACK26-NEXT: shlxl %ebx, %ecx, %esi
+; FALLBACK26-NEXT: notb %al
+; FALLBACK26-NEXT: movl 80(%esp,%edx), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: shrl %edi
-; FALLBACK26-NEXT: shrxl %ebx, %edi, %edi
-; FALLBACK26-NEXT: orl %ecx, %edi
+; FALLBACK26-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK26-NEXT: orl %esi, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, %esi, %ecx
-; FALLBACK26-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK26-NEXT: movl 76(%esp,%edx), %esi
; FALLBACK26-NEXT: movl %esi, %edi
; FALLBACK26-NEXT: shrl %edi
-; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
-; FALLBACK26-NEXT: orl %ecx, %ebp
-; FALLBACK26-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK26-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK26-NEXT: orl %ebp, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: movl 72(%esp,%edx), %edi
+; FALLBACK26-NEXT: movl %edi, %ebp
+; FALLBACK26-NEXT: shrl %ebp
+; FALLBACK26-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK26-NEXT: orl %esi, %ebp
+; FALLBACK26-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %esi
+; FALLBACK26-NEXT: movl 68(%esp,%edx), %ebp
+; FALLBACK26-NEXT: movl %ebp, %edi
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebp
; FALLBACK26-NEXT: movl 64(%esp,%edx), %esi
-; FALLBACK26-NEXT: movl %esi, %ecx
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %edi, %ecx
-; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, 92(%esp,%edx), %edi
-; FALLBACK26-NEXT: movl 88(%esp,%edx), %edx
-; FALLBACK26-NEXT: shlxl %eax, %edx, %esi
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %eax, %esi, %edi
+; FALLBACK26-NEXT: orl %ebp, %edi
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %eax, %ecx, %esi
+; FALLBACK26-NEXT: movl 88(%esp,%edx), %ecx
+; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ebp
+; FALLBACK26-NEXT: orl %ebp, %esi
+; FALLBACK26-NEXT: shlxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %ebx, 92(%esp,%edx), %edx
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %eax, %ecx, %eax
+; FALLBACK26-NEXT: orl %edx, %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: movl %ebp, (%ecx)
+; FALLBACK26-NEXT: movl %eax, 28(%ecx)
+; FALLBACK26-NEXT: movl %esi, 24(%ecx)
+; FALLBACK26-NEXT: movl %edi, 4(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: orl %esi, %eax
-; FALLBACK26-NEXT: shrl %edx
-; FALLBACK26-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK26-NEXT: orl %edi, %edx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; FALLBACK26-NEXT: movl %edi, (%esi)
-; FALLBACK26-NEXT: movl %edx, 28(%esi)
-; FALLBACK26-NEXT: movl %eax, 24(%esi)
-; FALLBACK26-NEXT: movl %ecx, 4(%esi)
-; FALLBACK26-NEXT: movl %ebp, 8(%esi)
+; FALLBACK26-NEXT: movl %eax, 8(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 12(%esi)
+; FALLBACK26-NEXT: movl %eax, 12(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 16(%esi)
+; FALLBACK26-NEXT: movl %eax, 16(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 20(%esi)
+; FALLBACK26-NEXT: movl %eax, 20(%ecx)
; FALLBACK26-NEXT: addl $108, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
@@ -7732,76 +7723,74 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %eax
; FALLBACK30-NEXT: shlb $3, %al
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $28, %cl
-; FALLBACK30-NEXT: negb %cl
-; FALLBACK30-NEXT: movsbl %cl, %edx
-; FALLBACK30-NEXT: movl 84(%esp,%edx), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK30-NEXT: movl 80(%esp,%edx), %esi
-; FALLBACK30-NEXT: shlxl %eax, %esi, %edi
; FALLBACK30-NEXT: movl %eax, %ebx
-; FALLBACK30-NEXT: notb %bl
-; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 76(%esp,%edx), %ecx
-; FALLBACK30-NEXT: movl %ecx, %esi
-; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %edi, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK30-NEXT: movl 72(%esp,%edx), %esi
-; FALLBACK30-NEXT: movl %esi, %edi
+; FALLBACK30-NEXT: andb $28, %dl
+; FALLBACK30-NEXT: negb %dl
+; FALLBACK30-NEXT: movsbl %dl, %edx
+; FALLBACK30-NEXT: movl 84(%esp,%edx), %ecx
+; FALLBACK30-NEXT: shlxl %ebx, %ecx, %esi
+; FALLBACK30-NEXT: notb %al
+; FALLBACK30-NEXT: movl 80(%esp,%edx), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: shrl %edi
-; FALLBACK30-NEXT: shrxl %ebx, %edi, %edi
-; FALLBACK30-NEXT: orl %ecx, %edi
+; FALLBACK30-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK30-NEXT: orl %esi, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, %esi, %ecx
-; FALLBACK30-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl 76(%esp,%edx), %esi
; FALLBACK30-NEXT: movl %esi, %edi
; FALLBACK30-NEXT: shrl %edi
-; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
-; FALLBACK30-NEXT: orl %ecx, %ebp
-; FALLBACK30-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK30-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK30-NEXT: orl %ebp, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: movl 72(%esp,%edx), %edi
+; FALLBACK30-NEXT: movl %edi, %ebp
+; FALLBACK30-NEXT: shrl %ebp
+; FALLBACK30-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK30-NEXT: orl %esi, %ebp
+; FALLBACK30-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %esi
+; FALLBACK30-NEXT: movl 68(%esp,%edx), %ebp
+; FALLBACK30-NEXT: movl %ebp, %edi
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebp
; FALLBACK30-NEXT: movl 64(%esp,%edx), %esi
-; FALLBACK30-NEXT: movl %esi, %ecx
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %edi, %ecx
-; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, 92(%esp,%edx), %edi
-; FALLBACK30-NEXT: movl 88(%esp,%edx), %edx
-; FALLBACK30-NEXT: shlxl %eax, %edx, %esi
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %eax, %esi, %edi
+; FALLBACK30-NEXT: orl %ebp, %edi
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %eax, %ecx, %esi
+; FALLBACK30-NEXT: movl 88(%esp,%edx), %ecx
+; FALLBACK30-NEXT: shlxl %ebx, %ecx, %ebp
+; FALLBACK30-NEXT: orl %ebp, %esi
+; FALLBACK30-NEXT: shlxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %ebx, 92(%esp,%edx), %edx
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %eax, %ecx, %eax
+; FALLBACK30-NEXT: orl %edx, %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: movl %ebp, (%ecx)
+; FALLBACK30-NEXT: movl %eax, 28(%ecx)
+; FALLBACK30-NEXT: movl %esi, 24(%ecx)
+; FALLBACK30-NEXT: movl %edi, 4(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: orl %esi, %eax
-; FALLBACK30-NEXT: shrl %edx
-; FALLBACK30-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK30-NEXT: orl %edi, %edx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; FALLBACK30-NEXT: movl %edi, (%esi)
-; FALLBACK30-NEXT: movl %edx, 28(%esi)
-; FALLBACK30-NEXT: movl %eax, 24(%esi)
-; FALLBACK30-NEXT: movl %ecx, 4(%esi)
-; FALLBACK30-NEXT: movl %ebp, 8(%esi)
+; FALLBACK30-NEXT: movl %eax, 8(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 12(%esi)
+; FALLBACK30-NEXT: movl %eax, 12(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 16(%esi)
+; FALLBACK30-NEXT: movl %eax, 16(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 20(%esi)
+; FALLBACK30-NEXT: movl %eax, 20(%ecx)
; FALLBACK30-NEXT: addl $108, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
@@ -7987,32 +7976,32 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: shlb $2, %sil
; FALLBACK2-NEXT: andb $24, %sil
; FALLBACK2-NEXT: negb %sil
-; FALLBACK2-NEXT: movsbq %sil, %rsi
-; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
-; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %r8
-; FALLBACK2-NEXT: shlxq %rax, -16(%rsp,%rsi), %r9
-; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %r10
-; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movsbq %sil, %rdi
+; FALLBACK2-NEXT: movq -40(%rsp,%rdi), %r8
+; FALLBACK2-NEXT: movq -32(%rsp,%rdi), %rsi
+; FALLBACK2-NEXT: shlxq %rcx, %rsi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK2-NEXT: shrq %r8
+; FALLBACK2-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: shlxq %rcx, -16(%rsp,%rdi), %r9
+; FALLBACK2-NEXT: movq -24(%rsp,%rdi), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK2-NEXT: shrq %rdi
; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
; FALLBACK2-NEXT: shrq %rsi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: shrq %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, (%rdx)
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rax
+; FALLBACK2-NEXT: orq %rcx, %rax
+; FALLBACK2-NEXT: movq %r10, (%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: shl_32bytes_dwordOff:
@@ -8135,40 +8124,40 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK6: # %bb.0:
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
-; FALLBACK6-NEXT: movzbl (%rsi), %ecx
-; FALLBACK6-NEXT: movl %ecx, %eax
+; FALLBACK6-NEXT: movzbl (%rsi), %esi
+; FALLBACK6-NEXT: movl %esi, %eax
; FALLBACK6-NEXT: shlb $5, %al
; FALLBACK6-NEXT: xorps %xmm2, %xmm2
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: shlb $2, %cl
-; FALLBACK6-NEXT: andb $24, %cl
-; FALLBACK6-NEXT: negb %cl
-; FALLBACK6-NEXT: movsbq %cl, %rcx
-; FALLBACK6-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK6-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK6-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK6-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movl %eax, %ecx
+; FALLBACK6-NEXT: shlb $2, %sil
+; FALLBACK6-NEXT: andb $24, %sil
+; FALLBACK6-NEXT: negb %sil
+; FALLBACK6-NEXT: movsbq %sil, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK6-NEXT: shrq %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK6-NEXT: orq %rdi, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK6-NEXT: shrq %rsi
+; FALLBACK6-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK6-NEXT: orq %r9, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK6-NEXT: shrq %rdi
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: shrq %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r8, %rcx
-; FALLBACK6-NEXT: shrq %r9
-; FALLBACK6-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, (%rdx)
+; FALLBACK6-NEXT: movq %rcx, (%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK6-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: shl_32bytes_dwordOff:
@@ -8283,38 +8272,38 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK10-LABEL: shl_32bytes_dwordOff:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: movl %ecx, %eax
+; FALLBACK10-NEXT: movzbl (%rsi), %esi
+; FALLBACK10-NEXT: movl %esi, %eax
; FALLBACK10-NEXT: shlb $5, %al
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: shlb $2, %cl
-; FALLBACK10-NEXT: andb $24, %cl
-; FALLBACK10-NEXT: negb %cl
-; FALLBACK10-NEXT: movsbq %cl, %rcx
-; FALLBACK10-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK10-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK10-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK10-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: shlb $2, %sil
+; FALLBACK10-NEXT: andb $24, %sil
+; FALLBACK10-NEXT: negb %sil
+; FALLBACK10-NEXT: movsbq %sil, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK10-NEXT: shrq %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %rdi, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK10-NEXT: shrq %rsi
+; FALLBACK10-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK10-NEXT: orq %r9, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK10-NEXT: shrq %rdi
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: shrq %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r8, %rcx
-; FALLBACK10-NEXT: shrq %r9
-; FALLBACK10-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, (%rdx)
+; FALLBACK10-NEXT: movq %rcx, (%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK10-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -8428,38 +8417,38 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK14-LABEL: shl_32bytes_dwordOff:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: movl %ecx, %eax
+; FALLBACK14-NEXT: movzbl (%rsi), %esi
+; FALLBACK14-NEXT: movl %esi, %eax
; FALLBACK14-NEXT: shlb $5, %al
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: shlb $2, %cl
-; FALLBACK14-NEXT: andb $24, %cl
-; FALLBACK14-NEXT: negb %cl
-; FALLBACK14-NEXT: movsbq %cl, %rcx
-; FALLBACK14-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK14-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK14-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK14-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: shlb $2, %sil
+; FALLBACK14-NEXT: andb $24, %sil
+; FALLBACK14-NEXT: negb %sil
+; FALLBACK14-NEXT: movsbq %sil, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK14-NEXT: shrq %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %rdi, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK14-NEXT: shrq %rsi
+; FALLBACK14-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK14-NEXT: orq %r9, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK14-NEXT: shrq %rdi
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: shrq %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r8, %rcx
-; FALLBACK14-NEXT: shrq %r9
-; FALLBACK14-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, (%rdx)
+; FALLBACK14-NEXT: movq %rcx, (%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK14-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -8906,30 +8895,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $24, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK2-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: ashr_32bytes:
@@ -9067,30 +9056,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %ecx
; FALLBACK6-NEXT: andb $24, %sil
-; FALLBACK6-NEXT: movzbl %sil, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movzbl %sil, %esi
+; FALLBACK6-NEXT: shrxq %rcx, -72(%rsp,%rsi), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rsi), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK6-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: ashr_32bytes:
@@ -9227,30 +9216,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movl %eax, %ecx
; FALLBACK10-NEXT: andb $24, %sil
-; FALLBACK10-NEXT: movzbl %sil, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movzbl %sil, %esi
+; FALLBACK10-NEXT: shrxq %rcx, -72(%rsp,%rsi), %rdi
; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq -64(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rsi), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK10-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %rdi, %rax
+; FALLBACK10-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK10-NEXT: movq %rcx, 24(%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: retq
;
; FALLBACK11-LABEL: ashr_32bytes:
@@ -9387,30 +9376,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movl %eax, %ecx
; FALLBACK14-NEXT: andb $24, %sil
-; FALLBACK14-NEXT: movzbl %sil, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movzbl %sil, %esi
+; FALLBACK14-NEXT: shrxq %rcx, -72(%rsp,%rsi), %rdi
; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq -64(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rsi), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK14-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %rdi, %rax
+; FALLBACK14-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK14-NEXT: movq %rcx, 24(%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: retq
;
; FALLBACK15-LABEL: ashr_32bytes:
@@ -9671,7 +9660,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: pushl %edi
; FALLBACK18-NEXT: pushl %esi
; FALLBACK18-NEXT: subl $108, %esp
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edx
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %esi
; FALLBACK18-NEXT: movl (%esi), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -9680,22 +9669,22 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl 8(%esi), %ebx
; FALLBACK18-NEXT: movl 12(%esi), %ebp
; FALLBACK18-NEXT: movl 16(%esi), %edi
-; FALLBACK18-NEXT: movzbl (%ecx), %ecx
-; FALLBACK18-NEXT: movl 20(%esi), %edx
+; FALLBACK18-NEXT: movzbl (%edx), %edx
+; FALLBACK18-NEXT: movl 20(%esi), %ecx
; FALLBACK18-NEXT: movl 24(%esi), %eax
; FALLBACK18-NEXT: movl 28(%esi), %esi
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %ecx, %eax
-; FALLBACK18-NEXT: shlb $3, %al
+; FALLBACK18-NEXT: movl %edx, %ecx
+; FALLBACK18-NEXT: shlb $3, %cl
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: sarl $31, %esi
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
@@ -9705,66 +9694,65 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: andb $28, %cl
-; FALLBACK18-NEXT: movzbl %cl, %edi
-; FALLBACK18-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK18-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK18-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK18-NEXT: movl %eax, %edx
-; FALLBACK18-NEXT: notb %dl
-; FALLBACK18-NEXT: leal (%ecx,%ecx), %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %ebp
-; FALLBACK18-NEXT: orl %ebx, %ebp
-; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %eax, 32(%esp,%edi), %ebx
-; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK18-NEXT: orl %ebx, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 48(%esp,%edi), %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %esi
-; FALLBACK18-NEXT: movl 44(%esp,%edi), %ebp
-; FALLBACK18-NEXT: shrxl %eax, %ebp, %ebx
-; FALLBACK18-NEXT: orl %ebx, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %eax, %ecx, %ecx
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl %ecx, %eax
+; FALLBACK18-NEXT: andb $28, %dl
+; FALLBACK18-NEXT: movzbl %dl, %esi
+; FALLBACK18-NEXT: movl 36(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl 40(%esp,%esi), %ebp
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: notb %cl
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ebx
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, 32(%esp,%esi), %edi
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: orl %edi, %edx
+; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 48(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %ebx
+; FALLBACK18-NEXT: movl 44(%esp,%esi), %edx
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, %ebp, %edi
+; FALLBACK18-NEXT: movl %eax, %ebp
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 56(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 52(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
-; FALLBACK18-NEXT: orl %esi, %ecx
-; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 56(%esp,%esi), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: movl 52(%esp,%esi), %eax
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %ebx
+; FALLBACK18-NEXT: orl %ebx, %edx
+; FALLBACK18-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %edx, %eax, %esi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %ebx, %ebp, %eax
-; FALLBACK18-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK18-NEXT: sarxl %ebx, %edi, %ebx
-; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %edx
-; FALLBACK18-NEXT: orl %eax, %edx
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %ebx, 28(%eax)
-; FALLBACK18-NEXT: movl %edx, 24(%eax)
-; FALLBACK18-NEXT: movl %esi, 16(%eax)
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, (%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: movl 60(%esp,%esi), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %ecx
+; FALLBACK18-NEXT: sarxl %ebp, %esi, %esi
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edi
+; FALLBACK18-NEXT: movl %esi, 28(%edi)
+; FALLBACK18-NEXT: movl %ecx, 24(%edi)
+; FALLBACK18-NEXT: movl %eax, 16(%edi)
+; FALLBACK18-NEXT: movl %edx, 20(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, (%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edi)
; FALLBACK18-NEXT: addl $108, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -10070,82 +10058,82 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movups (%ecx), %xmm0
; FALLBACK22-NEXT: movl 16(%ecx), %esi
; FALLBACK22-NEXT: movl 20(%ecx), %edi
-; FALLBACK22-NEXT: movl 24(%ecx), %ebx
-; FALLBACK22-NEXT: movl 28(%ecx), %edx
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: shlb $3, %al
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl 24(%ecx), %ebp
+; FALLBACK22-NEXT: movl 28(%ecx), %ecx
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %ebx
+; FALLBACK22-NEXT: shlb $3, %bl
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: sarl $31, %edx
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $28, %cl
-; FALLBACK22-NEXT: movzbl %cl, %edi
-; FALLBACK22-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %eax, %edx
-; FALLBACK22-NEXT: notb %dl
-; FALLBACK22-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK22-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK22-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK22-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK22-NEXT: movl %eax, %ecx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK22-NEXT: shlxl %edx, %ebx, %eax
-; FALLBACK22-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK22-NEXT: shrxl %ecx, %ebx, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: sarl $31, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ebx, %eax
+; FALLBACK22-NEXT: andb $28, %dl
+; FALLBACK22-NEXT: movzbl %dl, %ecx
+; FALLBACK22-NEXT: shrxl %eax, 32(%esp,%ecx), %edx
+; FALLBACK22-NEXT: movl %eax, %ebp
+; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: movl 36(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK22-NEXT: addl %ebx, %ebx
-; FALLBACK22-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK22-NEXT: orl %ebp, %ebx
-; FALLBACK22-NEXT: shrxl %ecx, %esi, %ecx
-; FALLBACK22-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK22-NEXT: sarxl %eax, %edi, %eax
-; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK22-NEXT: orl %ecx, %edi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %esi, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK22-NEXT: movl %eax, 28(%edx)
-; FALLBACK22-NEXT: movl %ecx, 4(%edx)
-; FALLBACK22-NEXT: movl %edi, 24(%edx)
-; FALLBACK22-NEXT: movl %ebx, 16(%edx)
+; FALLBACK22-NEXT: leal (%eax,%eax), %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: orl %edx, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 48(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edx
+; FALLBACK22-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK22-NEXT: movl 44(%esp,%ecx), %edx
+; FALLBACK22-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %edx, %edx
+; FALLBACK22-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK22-NEXT: movl 40(%esp,%ecx), %edx
+; FALLBACK22-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK22-NEXT: movl %ebp, %edx
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 56(%esp,%ecx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK22-NEXT: movl 52(%esp,%ecx), %eax
+; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK22-NEXT: orl %edi, %ebp
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK22-NEXT: movl 60(%esp,%ecx), %ecx
+; FALLBACK22-NEXT: leal (%ecx,%ecx), %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %eax, %esi
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: movl %edx, %ebx
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; FALLBACK22-NEXT: orl %edx, %eax
+; FALLBACK22-NEXT: sarxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %ecx, 28(%edx)
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
+; FALLBACK22-NEXT: movl %esi, 24(%edx)
+; FALLBACK22-NEXT: movl %edi, 16(%edx)
+; FALLBACK22-NEXT: movl %ebp, 20(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK22-NEXT: movl %eax, 8(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -10446,82 +10434,82 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
; FALLBACK26-NEXT: movl 16(%ecx), %esi
; FALLBACK26-NEXT: movl 20(%ecx), %edi
-; FALLBACK26-NEXT: movl 24(%ecx), %ebx
-; FALLBACK26-NEXT: movl 28(%ecx), %edx
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
-; FALLBACK26-NEXT: shlb $3, %al
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl 24(%ecx), %ebp
+; FALLBACK26-NEXT: movl 28(%ecx), %ecx
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %ebx
+; FALLBACK26-NEXT: shlb $3, %bl
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: sarl $31, %edx
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $28, %cl
-; FALLBACK26-NEXT: movzbl %cl, %edi
-; FALLBACK26-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %eax, %edx
-; FALLBACK26-NEXT: notb %dl
-; FALLBACK26-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK26-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK26-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK26-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK26-NEXT: movl %eax, %ecx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK26-NEXT: shlxl %edx, %ebx, %eax
-; FALLBACK26-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK26-NEXT: shrxl %ecx, %ebx, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: sarl $31, %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ebx, %eax
+; FALLBACK26-NEXT: andb $28, %dl
+; FALLBACK26-NEXT: movzbl %dl, %ecx
+; FALLBACK26-NEXT: shrxl %eax, 32(%esp,%ecx), %edx
+; FALLBACK26-NEXT: movl %eax, %ebp
+; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: movl 36(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl %ecx, %eax
-; FALLBACK26-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK26-NEXT: addl %ebx, %ebx
-; FALLBACK26-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK26-NEXT: orl %ebp, %ebx
-; FALLBACK26-NEXT: shrxl %ecx, %esi, %ecx
-; FALLBACK26-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK26-NEXT: sarxl %eax, %edi, %eax
-; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK26-NEXT: orl %ecx, %edi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %esi, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK26-NEXT: movl %eax, 28(%edx)
-; FALLBACK26-NEXT: movl %ecx, 4(%edx)
-; FALLBACK26-NEXT: movl %edi, 24(%edx)
-; FALLBACK26-NEXT: movl %ebx, 16(%edx)
+; FALLBACK26-NEXT: leal (%eax,%eax), %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: orl %edx, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 48(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edx
+; FALLBACK26-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK26-NEXT: movl 44(%esp,%ecx), %edx
+; FALLBACK26-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %edx, %edx
+; FALLBACK26-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK26-NEXT: movl 40(%esp,%ecx), %edx
+; FALLBACK26-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK26-NEXT: movl %ebp, %edx
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 56(%esp,%ecx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK26-NEXT: movl 52(%esp,%ecx), %eax
+; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK26-NEXT: orl %edi, %ebp
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK26-NEXT: movl 60(%esp,%ecx), %ecx
+; FALLBACK26-NEXT: leal (%ecx,%ecx), %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %eax, %esi
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: movl %edx, %ebx
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %edx, %eax
+; FALLBACK26-NEXT: sarxl %ebx, %ecx, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %ecx, 28(%edx)
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
+; FALLBACK26-NEXT: movl %esi, 24(%edx)
+; FALLBACK26-NEXT: movl %edi, 16(%edx)
+; FALLBACK26-NEXT: movl %ebp, 20(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK26-NEXT: movl %eax, 8(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -10822,82 +10810,82 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
; FALLBACK30-NEXT: movl 16(%ecx), %esi
; FALLBACK30-NEXT: movl 20(%ecx), %edi
-; FALLBACK30-NEXT: movl 24(%ecx), %ebx
-; FALLBACK30-NEXT: movl 28(%ecx), %edx
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
-; FALLBACK30-NEXT: shlb $3, %al
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl 24(%ecx), %ebp
+; FALLBACK30-NEXT: movl 28(%ecx), %ecx
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: shlb $3, %bl
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: sarl $31, %edx
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $28, %cl
-; FALLBACK30-NEXT: movzbl %cl, %edi
-; FALLBACK30-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %eax, %edx
-; FALLBACK30-NEXT: notb %dl
-; FALLBACK30-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK30-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK30-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK30-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK30-NEXT: movl %eax, %ecx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK30-NEXT: shlxl %edx, %ebx, %eax
-; FALLBACK30-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK30-NEXT: shrxl %ecx, %ebx, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: sarl $31, %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ebx, %eax
+; FALLBACK30-NEXT: andb $28, %dl
+; FALLBACK30-NEXT: movzbl %dl, %ecx
+; FALLBACK30-NEXT: shrxl %eax, 32(%esp,%ecx), %edx
+; FALLBACK30-NEXT: movl %eax, %ebp
+; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: movl 36(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl %ecx, %eax
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK30-NEXT: addl %ebx, %ebx
-; FALLBACK30-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK30-NEXT: orl %ebp, %ebx
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %ecx
-; FALLBACK30-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK30-NEXT: sarxl %eax, %edi, %eax
-; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK30-NEXT: orl %ecx, %edi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %esi, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK30-NEXT: movl %eax, 28(%edx)
-; FALLBACK30-NEXT: movl %ecx, 4(%edx)
-; FALLBACK30-NEXT: movl %edi, 24(%edx)
-; FALLBACK30-NEXT: movl %ebx, 16(%edx)
+; FALLBACK30-NEXT: leal (%eax,%eax), %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: orl %edx, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 48(%esp,%ecx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edx
+; FALLBACK30-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK30-NEXT: movl 44(%esp,%ecx), %edx
+; FALLBACK30-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %edx, %edx
+; FALLBACK30-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK30-NEXT: movl 40(%esp,%ecx), %edx
+; FALLBACK30-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK30-NEXT: movl %ebp, %edx
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 56(%esp,%ecx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK30-NEXT: movl 52(%esp,%ecx), %eax
+; FALLBACK30-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK30-NEXT: orl %edi, %ebp
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK30-NEXT: movl 60(%esp,%ecx), %ecx
+; FALLBACK30-NEXT: leal (%ecx,%ecx), %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %eax, %esi
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %edx, %eax
+; FALLBACK30-NEXT: sarxl %ebx, %ecx, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %ecx, 28(%edx)
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
+; FALLBACK30-NEXT: movl %esi, 24(%edx)
+; FALLBACK30-NEXT: movl %edi, 16(%edx)
+; FALLBACK30-NEXT: movl %ebp, 20(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK30-NEXT: movl %eax, 8(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -11104,30 +11092,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $6, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx,4), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx,4), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK2-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi,4), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi,4), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: ashr_32bytes_dwordOff:
@@ -11268,30 +11256,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %ecx
; FALLBACK6-NEXT: andb $6, %sil
-; FALLBACK6-NEXT: movzbl %sil, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movzbl %sil, %esi
+; FALLBACK6-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rsi,4), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rsi,4), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK6-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: ashr_32bytes_dwordOff:
@@ -11431,30 +11419,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movl %eax, %ecx
; FALLBACK10-NEXT: andb $6, %sil
-; FALLBACK10-NEXT: movzbl %sil, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movzbl %sil, %esi
+; FALLBACK10-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %rdi
; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq -64(%rsp,%rsi,4), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rsi,4), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK10-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK10-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %rdi, %rax
+; FALLBACK10-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK10-NEXT: movq %rcx, 24(%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: retq
;
; FALLBACK11-LABEL: ashr_32bytes_dwordOff:
@@ -11594,30 +11582,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movl %eax, %ecx
; FALLBACK14-NEXT: andb $6, %sil
-; FALLBACK14-NEXT: movzbl %sil, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movzbl %sil, %esi
+; FALLBACK14-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %rdi
; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq -64(%rsp,%rsi,4), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rsi,4), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK14-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK14-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %rdi, %rax
+; FALLBACK14-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK14-NEXT: movq %rcx, 24(%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: retq
;
; FALLBACK15-LABEL: ashr_32bytes_dwordOff:
@@ -12204,10 +12192,8 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK2-LABEL: lshr_64bytes:
; FALLBACK2: # %bb.0:
-; FALLBACK2-NEXT: pushq %rbp
; FALLBACK2-NEXT: pushq %r15
; FALLBACK2-NEXT: pushq %r14
-; FALLBACK2-NEXT: pushq %r13
; FALLBACK2-NEXT: pushq %r12
; FALLBACK2-NEXT: pushq %rbx
; FALLBACK2-NEXT: pushq %rax
@@ -12235,60 +12221,58 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: leal (,%rax,8), %ecx
; FALLBACK2-NEXT: andl $56, %ecx
+; FALLBACK2-NEXT: movl %ecx, %esi
; FALLBACK2-NEXT: andl $56, %eax
-; FALLBACK2-NEXT: movq -120(%rsp,%rax), %rdi
-; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r9
-; FALLBACK2-NEXT: shrxq %rcx, %rdi, %rbx
-; FALLBACK2-NEXT: shrxq %rcx, -128(%rsp,%rax), %r13
-; FALLBACK2-NEXT: movq -104(%rsp,%rax), %rsi
-; FALLBACK2-NEXT: shrxq %rcx, %rsi, %r8
-; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r10
-; FALLBACK2-NEXT: shrxq %rcx, %r9, %r11
-; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r14
-; FALLBACK2-NEXT: shrxq %rcx, %r14, %r15
-; FALLBACK2-NEXT: shrxq %rcx, %r10, %rbp
-; FALLBACK2-NEXT: movl %ecx, %r12d
-; FALLBACK2-NEXT: notb %r12b
-; FALLBACK2-NEXT: addq %r9, %r9
-; FALLBACK2-NEXT: shlxq %r12, %r9, %r9
+; FALLBACK2-NEXT: movq -120(%rsp,%rax), %r8
+; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r8, %r9
+; FALLBACK2-NEXT: notb %cl
+; FALLBACK2-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rsi, -128(%rsp,%rax), %r9
+; FALLBACK2-NEXT: addq %r8, %r8
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK2-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK2-NEXT: leaq (%r14,%r14), %r9
+; FALLBACK2-NEXT: shlxq %rcx, %r9, %r9
; FALLBACK2-NEXT: orq %rbx, %r9
-; FALLBACK2-NEXT: addq %rdi, %rdi
-; FALLBACK2-NEXT: shlxq %r12, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r13, %rdi
-; FALLBACK2-NEXT: movq -80(%rsp,%rax), %rbx
-; FALLBACK2-NEXT: shrxq %rcx, %rbx, %r13
-; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK2-NEXT: shrxq %rcx, %rax, %rcx
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK2-NEXT: addq %r11, %r11
+; FALLBACK2-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK2-NEXT: orq %r10, %r11
+; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %rbx
+; FALLBACK2-NEXT: movq -80(%rsp,%rax), %r15
+; FALLBACK2-NEXT: leaq (%r15,%r15), %r12
+; FALLBACK2-NEXT: shlxq %rcx, %r12, %r12
+; FALLBACK2-NEXT: orq %rbx, %r12
+; FALLBACK2-NEXT: shrxq %rsi, %r14, %rbx
; FALLBACK2-NEXT: addq %r10, %r10
-; FALLBACK2-NEXT: shlxq %r12, %r10, %r10
-; FALLBACK2-NEXT: orq %r8, %r10
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %r12, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r11, %rsi
-; FALLBACK2-NEXT: leaq (%rbx,%rbx), %r8
-; FALLBACK2-NEXT: shlxq %r12, %r8, %r8
-; FALLBACK2-NEXT: orq %r15, %r8
-; FALLBACK2-NEXT: addq %r14, %r14
-; FALLBACK2-NEXT: shlxq %r12, %r14, %r11
-; FALLBACK2-NEXT: orq %rbp, %r11
-; FALLBACK2-NEXT: addq %rax, %rax
-; FALLBACK2-NEXT: shlxq %r12, %rax, %rax
-; FALLBACK2-NEXT: orq %r13, %rax
-; FALLBACK2-NEXT: movq %rcx, 56(%rdx)
-; FALLBACK2-NEXT: movq %rax, 48(%rdx)
-; FALLBACK2-NEXT: movq %r11, 32(%rdx)
-; FALLBACK2-NEXT: movq %r8, 40(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 16(%rdx)
-; FALLBACK2-NEXT: movq %r10, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, (%rdx)
-; FALLBACK2-NEXT: movq %r9, 8(%rdx)
+; FALLBACK2-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK2-NEXT: orq %rbx, %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r15, %rbx
+; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK2-NEXT: leaq (%rax,%rax), %r14
+; FALLBACK2-NEXT: shlxq %rcx, %r14, %rcx
+; FALLBACK2-NEXT: orq %rbx, %rcx
+; FALLBACK2-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK2-NEXT: movq %rax, 56(%rdx)
+; FALLBACK2-NEXT: movq %rcx, 48(%rdx)
+; FALLBACK2-NEXT: movq %r10, 32(%rdx)
+; FALLBACK2-NEXT: movq %r12, 40(%rdx)
+; FALLBACK2-NEXT: movq %r11, 16(%rdx)
+; FALLBACK2-NEXT: movq %r9, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
; FALLBACK2-NEXT: addq $8, %rsp
; FALLBACK2-NEXT: popq %rbx
; FALLBACK2-NEXT: popq %r12
-; FALLBACK2-NEXT: popq %r13
; FALLBACK2-NEXT: popq %r14
; FALLBACK2-NEXT: popq %r15
-; FALLBACK2-NEXT: popq %rbp
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: lshr_64bytes:
@@ -12512,13 +12496,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK6-LABEL: lshr_64bytes:
; FALLBACK6: # %bb.0:
-; FALLBACK6-NEXT: pushq %rbp
; FALLBACK6-NEXT: pushq %r15
; FALLBACK6-NEXT: pushq %r14
; FALLBACK6-NEXT: pushq %r13
; FALLBACK6-NEXT: pushq %r12
; FALLBACK6-NEXT: pushq %rbx
-; FALLBACK6-NEXT: pushq %rax
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
@@ -12533,62 +12515,60 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: leal (,%rax,8), %esi
-; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: leal (,%rax,8), %ecx
+; FALLBACK6-NEXT: andl $56, %ecx
+; FALLBACK6-NEXT: movl %ecx, %esi
; FALLBACK6-NEXT: andl $56, %eax
-; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK6-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK6-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK6-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK6-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK6-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK6-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK6-NEXT: movl %esi, %ebx
-; FALLBACK6-NEXT: notb %bl
-; FALLBACK6-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK6-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK6-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK6-NEXT: orq %r11, %r8
-; FALLBACK6-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK6-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK6-NEXT: orq %r12, %r11
+; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK6-NEXT: notb %cl
+; FALLBACK6-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK6-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK6-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK6-NEXT: orq %r8, %rdi
+; FALLBACK6-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK6-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK6-NEXT: orq %rbx, %r8
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK6-NEXT: addq %r11, %r11
+; FALLBACK6-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK6-NEXT: orq %rbx, %r11
+; FALLBACK6-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK6-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK6-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK6-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK6-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK6-NEXT: orq %r15, %r13
+; FALLBACK6-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK6-NEXT: addq %rbx, %rbx
+; FALLBACK6-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK6-NEXT: orq %r14, %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK6-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK6-NEXT: shrxq %rsi, %rax, %rsi
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK6-NEXT: orq %r9, %rdi
-; FALLBACK6-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK6-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK6-NEXT: orq %r14, %r9
-; FALLBACK6-NEXT: addq %r10, %r10
-; FALLBACK6-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK6-NEXT: orq %r15, %r10
-; FALLBACK6-NEXT: addq %rax, %rax
-; FALLBACK6-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK6-NEXT: orq %r13, %rax
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK6-NEXT: orq %rbp, %rcx
-; FALLBACK6-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK6-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK6-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK6-NEXT: orq %r14, %r15
+; FALLBACK6-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK6-NEXT: orq %r10, %rcx
+; FALLBACK6-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK6-NEXT: movq %rax, 56(%rdx)
; FALLBACK6-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK6-NEXT: movq %rax, 48(%rdx)
-; FALLBACK6-NEXT: movq %r10, 32(%rdx)
-; FALLBACK6-NEXT: movq %r9, 40(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
-; FALLBACK6-NEXT: movq %r8, (%rdx)
-; FALLBACK6-NEXT: addq $8, %rsp
+; FALLBACK6-NEXT: movq %r15, 48(%rdx)
+; FALLBACK6-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK6-NEXT: movq %r13, 40(%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
; FALLBACK6-NEXT: popq %rbx
; FALLBACK6-NEXT: popq %r12
; FALLBACK6-NEXT: popq %r13
; FALLBACK6-NEXT: popq %r14
; FALLBACK6-NEXT: popq %r15
-; FALLBACK6-NEXT: popq %rbp
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: lshr_64bytes:
@@ -12749,43 +12729,43 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK9-NEXT: pushq %rbx
; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
; FALLBACK9-NEXT: vmovups 32(%rdi), %ymm1
-; FALLBACK9-NEXT: movl (%rsi), %eax
+; FALLBACK9-NEXT: movl (%rsi), %edi
; FALLBACK9-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK9-NEXT: leal (,%rax,8), %ecx
+; FALLBACK9-NEXT: leal (,%rdi,8), %ecx
; FALLBACK9-NEXT: andl $56, %ecx
-; FALLBACK9-NEXT: andl $56, %eax
-; FALLBACK9-NEXT: movq -96(%rsp,%rax), %rdi
-; FALLBACK9-NEXT: movq -104(%rsp,%rax), %r9
-; FALLBACK9-NEXT: movq %r9, %rsi
-; FALLBACK9-NEXT: shrdq %cl, %rdi, %rsi
-; FALLBACK9-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK9-NEXT: andl $56, %edi
+; FALLBACK9-NEXT: movq -96(%rsp,%rdi), %rsi
+; FALLBACK9-NEXT: movq -104(%rsp,%rdi), %r9
+; FALLBACK9-NEXT: movq %r9, %rax
+; FALLBACK9-NEXT: shrdq %cl, %rsi, %rax
+; FALLBACK9-NEXT: movq -112(%rsp,%rdi), %r10
; FALLBACK9-NEXT: movq %r10, %r8
; FALLBACK9-NEXT: shrdq %cl, %r9, %r8
-; FALLBACK9-NEXT: movq -80(%rsp,%rax), %r9
-; FALLBACK9-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK9-NEXT: movq -80(%rsp,%rdi), %r9
+; FALLBACK9-NEXT: movq -88(%rsp,%rdi), %r11
; FALLBACK9-NEXT: movq %r11, %rbx
; FALLBACK9-NEXT: shrdq %cl, %r9, %rbx
-; FALLBACK9-NEXT: shrdq %cl, %r11, %rdi
-; FALLBACK9-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK9-NEXT: shrdq %cl, %r11, %rsi
+; FALLBACK9-NEXT: movq -72(%rsp,%rdi), %r11
; FALLBACK9-NEXT: shrdq %cl, %r11, %r9
-; FALLBACK9-NEXT: movq -128(%rsp,%rax), %r14
-; FALLBACK9-NEXT: movq -120(%rsp,%rax), %rax
-; FALLBACK9-NEXT: movq %rax, %r15
+; FALLBACK9-NEXT: movq -128(%rsp,%rdi), %r14
+; FALLBACK9-NEXT: movq -120(%rsp,%rdi), %rdi
+; FALLBACK9-NEXT: movq %rdi, %r15
; FALLBACK9-NEXT: shrdq %cl, %r10, %r15
-; FALLBACK9-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %r14
; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
; FALLBACK9-NEXT: shrq %cl, %r11
; FALLBACK9-NEXT: movq %r15, 8(%rdx)
; FALLBACK9-NEXT: movq %r9, 48(%rdx)
; FALLBACK9-NEXT: movq %r11, 56(%rdx)
-; FALLBACK9-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 32(%rdx)
; FALLBACK9-NEXT: movq %rbx, 40(%rdx)
; FALLBACK9-NEXT: movq %r8, 16(%rdx)
-; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %rax, 24(%rdx)
; FALLBACK9-NEXT: movq %r14, (%rdx)
; FALLBACK9-NEXT: popq %rbx
; FALLBACK9-NEXT: popq %r14
@@ -12795,77 +12775,73 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK10-LABEL: lshr_64bytes:
; FALLBACK10: # %bb.0:
-; FALLBACK10-NEXT: pushq %rbp
; FALLBACK10-NEXT: pushq %r15
; FALLBACK10-NEXT: pushq %r14
; FALLBACK10-NEXT: pushq %r13
; FALLBACK10-NEXT: pushq %r12
; FALLBACK10-NEXT: pushq %rbx
-; FALLBACK10-NEXT: pushq %rax
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
; FALLBACK10-NEXT: vmovups 32(%rdi), %ymm1
-; FALLBACK10-NEXT: movl (%rsi), %eax
+; FALLBACK10-NEXT: movl (%rsi), %esi
; FALLBACK10-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: leal (,%rax,8), %esi
-; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: leal (,%rsi,8), %eax
; FALLBACK10-NEXT: andl $56, %eax
-; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK10-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK10-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK10-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK10-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK10-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK10-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK10-NEXT: movl %esi, %ebx
-; FALLBACK10-NEXT: notb %bl
-; FALLBACK10-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK10-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK10-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK10-NEXT: orq %r11, %r8
-; FALLBACK10-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK10-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK10-NEXT: orq %r12, %r11
-; FALLBACK10-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK10-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rbp, %rbp
-; FALLBACK10-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK10-NEXT: shrxq %rsi, %rax, %rsi
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK10-NEXT: orq %r9, %rdi
-; FALLBACK10-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK10-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK10-NEXT: orq %r14, %r9
-; FALLBACK10-NEXT: addq %r10, %r10
-; FALLBACK10-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK10-NEXT: orq %r15, %r10
-; FALLBACK10-NEXT: addq %rax, %rax
-; FALLBACK10-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK10-NEXT: orq %r13, %rax
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK10-NEXT: orq %rbp, %rcx
-; FALLBACK10-NEXT: movq %rsi, 56(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK10-NEXT: movq %rax, 48(%rdx)
-; FALLBACK10-NEXT: movq %r10, 32(%rdx)
-; FALLBACK10-NEXT: movq %r9, 40(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %r8, (%rdx)
-; FALLBACK10-NEXT: addq $8, %rsp
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: shrxq %rcx, -128(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -120(%rsp,%rsi), %r10
+; FALLBACK10-NEXT: movq -112(%rsp,%rsi), %r9
+; FALLBACK10-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %r8, %rdi
+; FALLBACK10-NEXT: movq -104(%rsp,%rsi), %r11
+; FALLBACK10-NEXT: shrxq %rcx, %r11, %rbx
+; FALLBACK10-NEXT: movq -96(%rsp,%rsi), %r14
+; FALLBACK10-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK10-NEXT: shlxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %rbx, %r8
+; FALLBACK10-NEXT: shrxq %rcx, %r9, %rbx
+; FALLBACK10-NEXT: addq %r11, %r11
+; FALLBACK10-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %rbx, %r11
+; FALLBACK10-NEXT: movq -88(%rsp,%rsi), %rbx
+; FALLBACK10-NEXT: shrxq %rcx, %rbx, %r15
+; FALLBACK10-NEXT: movq -80(%rsp,%rsi), %r12
+; FALLBACK10-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK10-NEXT: shlxq %rax, %r13, %r13
+; FALLBACK10-NEXT: orq %r15, %r13
+; FALLBACK10-NEXT: shrxq %rcx, %r14, %r14
+; FALLBACK10-NEXT: addq %rbx, %rbx
+; FALLBACK10-NEXT: shlxq %rax, %rbx, %rbx
+; FALLBACK10-NEXT: orq %r14, %rbx
+; FALLBACK10-NEXT: shrxq %rcx, %r12, %r14
+; FALLBACK10-NEXT: movq -72(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: leaq (%rsi,%rsi), %r15
+; FALLBACK10-NEXT: shlxq %rax, %r15, %r15
+; FALLBACK10-NEXT: orq %r14, %r15
+; FALLBACK10-NEXT: shrxq %rcx, %r10, %r10
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: shrxq %rcx, %rsi, %rcx
+; FALLBACK10-NEXT: movq %rcx, 56(%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %r15, 48(%rdx)
+; FALLBACK10-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK10-NEXT: movq %r13, 40(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
; FALLBACK10-NEXT: popq %rbx
; FALLBACK10-NEXT: popq %r12
; FALLBACK10-NEXT: popq %r13
; FALLBACK10-NEXT: popq %r14
; FALLBACK10-NEXT: popq %r15
-; FALLBACK10-NEXT: popq %rbp
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -12930,45 +12906,45 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK12-NEXT: pushq %rbx
; FALLBACK12-NEXT: pushq %rax
; FALLBACK12-NEXT: vmovups (%rdi), %zmm0
-; FALLBACK12-NEXT: movl (%rsi), %r9d
+; FALLBACK12-NEXT: movl (%rsi), %r10d
; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK12-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK12-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK12-NEXT: leal (,%r9,8), %eax
+; FALLBACK12-NEXT: leal (,%r10,8), %eax
; FALLBACK12-NEXT: andl $56, %eax
-; FALLBACK12-NEXT: andl $56, %r9d
-; FALLBACK12-NEXT: movq -128(%rsp,%r9), %r10
-; FALLBACK12-NEXT: movq -120(%rsp,%r9), %r8
+; FALLBACK12-NEXT: andl $56, %r10d
+; FALLBACK12-NEXT: movq -128(%rsp,%r10), %r9
+; FALLBACK12-NEXT: movq -120(%rsp,%r10), %r8
; FALLBACK12-NEXT: movl %eax, %ecx
-; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: shrq %cl, %r9
; FALLBACK12-NEXT: movl %eax, %esi
; FALLBACK12-NEXT: notb %sil
; FALLBACK12-NEXT: leaq (%r8,%r8), %rdi
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %rdi
-; FALLBACK12-NEXT: orq %r10, %rdi
-; FALLBACK12-NEXT: movq -104(%rsp,%r9), %r10
-; FALLBACK12-NEXT: movq %r10, %rbx
+; FALLBACK12-NEXT: orq %r9, %rdi
+; FALLBACK12-NEXT: movq -104(%rsp,%r10), %r9
+; FALLBACK12-NEXT: movq %r9, %rbx
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %rbx
-; FALLBACK12-NEXT: movq -96(%rsp,%r9), %r12
+; FALLBACK12-NEXT: movq -96(%rsp,%r10), %r12
; FALLBACK12-NEXT: leaq (%r12,%r12), %r11
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %r11
; FALLBACK12-NEXT: orq %rbx, %r11
-; FALLBACK12-NEXT: movq -112(%rsp,%r9), %rbx
+; FALLBACK12-NEXT: movq -112(%rsp,%r10), %rbx
; FALLBACK12-NEXT: movq %rbx, %r14
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %r14
-; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: addq %r9, %r9
; FALLBACK12-NEXT: movl %esi, %ecx
-; FALLBACK12-NEXT: shlq %cl, %r10
-; FALLBACK12-NEXT: orq %r14, %r10
-; FALLBACK12-NEXT: movq -88(%rsp,%r9), %r14
+; FALLBACK12-NEXT: shlq %cl, %r9
+; FALLBACK12-NEXT: orq %r14, %r9
+; FALLBACK12-NEXT: movq -88(%rsp,%r10), %r14
; FALLBACK12-NEXT: movq %r14, %r13
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %r13
-; FALLBACK12-NEXT: movq -80(%rsp,%r9), %rbp
+; FALLBACK12-NEXT: movq -80(%rsp,%r10), %rbp
; FALLBACK12-NEXT: leaq (%rbp,%rbp), %r15
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %r15
@@ -12981,8 +12957,8 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK12-NEXT: orq %r12, %r14
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %rbp
-; FALLBACK12-NEXT: movq -72(%rsp,%r9), %r9
-; FALLBACK12-NEXT: leaq (%r9,%r9), %r12
+; FALLBACK12-NEXT: movq -72(%rsp,%r10), %r10
+; FALLBACK12-NEXT: leaq (%r10,%r10), %r12
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %r12
; FALLBACK12-NEXT: orq %rbp, %r12
@@ -12993,13 +12969,13 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK12-NEXT: shlq %cl, %rbx
; FALLBACK12-NEXT: orq %r8, %rbx
; FALLBACK12-NEXT: movl %eax, %ecx
-; FALLBACK12-NEXT: shrq %cl, %r9
-; FALLBACK12-NEXT: movq %r9, 56(%rdx)
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movq %r10, 56(%rdx)
; FALLBACK12-NEXT: movq %rbx, 8(%rdx)
; FALLBACK12-NEXT: movq %r12, 48(%rdx)
; FALLBACK12-NEXT: movq %r14, 32(%rdx)
; FALLBACK12-NEXT: movq %r15, 40(%rdx)
-; FALLBACK12-NEXT: movq %r10, 16(%rdx)
+; FALLBACK12-NEXT: movq %r9, 16(%rdx)
; FALLBACK12-NEXT: movq %r11, 24(%rdx)
; FALLBACK12-NEXT: movq %rdi, (%rdx)
; FALLBACK12-NEXT: addq $8, %rsp
@@ -13062,74 +13038,70 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK14-LABEL: lshr_64bytes:
; FALLBACK14: # %bb.0:
-; FALLBACK14-NEXT: pushq %rbp
; FALLBACK14-NEXT: pushq %r15
; FALLBACK14-NEXT: pushq %r14
; FALLBACK14-NEXT: pushq %r13
; FALLBACK14-NEXT: pushq %r12
; FALLBACK14-NEXT: pushq %rbx
-; FALLBACK14-NEXT: pushq %rax
; FALLBACK14-NEXT: vmovups (%rdi), %zmm0
; FALLBACK14-NEXT: movl (%rsi), %esi
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: leal (,%rsi,8), %ecx
-; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: leal (,%rsi,8), %eax
+; FALLBACK14-NEXT: andl $56, %eax
+; FALLBACK14-NEXT: movl %eax, %ecx
; FALLBACK14-NEXT: andl $56, %esi
-; FALLBACK14-NEXT: shrxq %rcx, -128(%rsp,%rsi), %r11
-; FALLBACK14-NEXT: movq -112(%rsp,%rsi), %rax
-; FALLBACK14-NEXT: movq -104(%rsp,%rsi), %rdi
-; FALLBACK14-NEXT: shrxq %rcx, %rdi, %r12
-; FALLBACK14-NEXT: movq -96(%rsp,%rsi), %r13
-; FALLBACK14-NEXT: shrxq %rcx, %rax, %r9
-; FALLBACK14-NEXT: movq -88(%rsp,%rsi), %r10
-; FALLBACK14-NEXT: shrxq %rcx, %r10, %r14
-; FALLBACK14-NEXT: shrxq %rcx, %r13, %r15
-; FALLBACK14-NEXT: movl %ecx, %ebx
-; FALLBACK14-NEXT: notb %bl
-; FALLBACK14-NEXT: movq -120(%rsp,%rsi), %rbp
-; FALLBACK14-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK14-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK14-NEXT: orq %r11, %r8
-; FALLBACK14-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK14-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK14-NEXT: orq %r12, %r11
+; FALLBACK14-NEXT: shrxq %rcx, -128(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -120(%rsp,%rsi), %r10
+; FALLBACK14-NEXT: movq -112(%rsp,%rsi), %r9
+; FALLBACK14-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %r8, %rdi
+; FALLBACK14-NEXT: movq -104(%rsp,%rsi), %r11
+; FALLBACK14-NEXT: shrxq %rcx, %r11, %rbx
+; FALLBACK14-NEXT: movq -96(%rsp,%rsi), %r14
+; FALLBACK14-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK14-NEXT: shlxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %rbx, %r8
+; FALLBACK14-NEXT: shrxq %rcx, %r9, %rbx
+; FALLBACK14-NEXT: addq %r11, %r11
+; FALLBACK14-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %rbx, %r11
+; FALLBACK14-NEXT: movq -88(%rsp,%rsi), %rbx
+; FALLBACK14-NEXT: shrxq %rcx, %rbx, %r15
; FALLBACK14-NEXT: movq -80(%rsp,%rsi), %r12
-; FALLBACK14-NEXT: shrxq %rcx, %r12, %r13
-; FALLBACK14-NEXT: shrxq %rcx, %rbp, %rbp
+; FALLBACK14-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK14-NEXT: shlxq %rax, %r13, %r13
+; FALLBACK14-NEXT: orq %r15, %r13
+; FALLBACK14-NEXT: shrxq %rcx, %r14, %r14
+; FALLBACK14-NEXT: addq %rbx, %rbx
+; FALLBACK14-NEXT: shlxq %rax, %rbx, %rbx
+; FALLBACK14-NEXT: orq %r14, %rbx
+; FALLBACK14-NEXT: shrxq %rcx, %r12, %r14
; FALLBACK14-NEXT: movq -72(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: leaq (%rsi,%rsi), %r15
+; FALLBACK14-NEXT: shlxq %rax, %r15, %r15
+; FALLBACK14-NEXT: orq %r14, %r15
+; FALLBACK14-NEXT: shrxq %rcx, %r10, %r10
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
; FALLBACK14-NEXT: shrxq %rcx, %rsi, %rcx
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK14-NEXT: orq %r9, %rdi
-; FALLBACK14-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK14-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK14-NEXT: orq %r14, %r9
-; FALLBACK14-NEXT: addq %r10, %r10
-; FALLBACK14-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK14-NEXT: orq %r15, %r10
-; FALLBACK14-NEXT: addq %rsi, %rsi
-; FALLBACK14-NEXT: shlxq %rbx, %rsi, %rsi
-; FALLBACK14-NEXT: orq %r13, %rsi
-; FALLBACK14-NEXT: addq %rax, %rax
-; FALLBACK14-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK14-NEXT: orq %rbp, %rax
; FALLBACK14-NEXT: movq %rcx, 56(%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rsi, 48(%rdx)
-; FALLBACK14-NEXT: movq %r10, 32(%rdx)
-; FALLBACK14-NEXT: movq %r9, 40(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %r8, (%rdx)
-; FALLBACK14-NEXT: addq $8, %rsp
+; FALLBACK14-NEXT: movq %r15, 48(%rdx)
+; FALLBACK14-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK14-NEXT: movq %r13, 40(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
; FALLBACK14-NEXT: popq %rbx
; FALLBACK14-NEXT: popq %r12
; FALLBACK14-NEXT: popq %r13
; FALLBACK14-NEXT: popq %r14
; FALLBACK14-NEXT: popq %r15
-; FALLBACK14-NEXT: popq %rbp
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -13139,40 +13111,40 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK15-NEXT: pushq %r14
; FALLBACK15-NEXT: pushq %rbx
; FALLBACK15-NEXT: vmovups (%rdi), %zmm0
-; FALLBACK15-NEXT: movl (%rsi), %eax
+; FALLBACK15-NEXT: movl (%rsi), %edi
; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK15-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK15-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK15-NEXT: leal (,%rax,8), %ecx
+; FALLBACK15-NEXT: leal (,%rdi,8), %ecx
; FALLBACK15-NEXT: andl $56, %ecx
-; FALLBACK15-NEXT: andl $56, %eax
-; FALLBACK15-NEXT: movq -96(%rsp,%rax), %rdi
-; FALLBACK15-NEXT: movq -104(%rsp,%rax), %r9
-; FALLBACK15-NEXT: movq %r9, %rsi
-; FALLBACK15-NEXT: shrdq %cl, %rdi, %rsi
-; FALLBACK15-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK15-NEXT: andl $56, %edi
+; FALLBACK15-NEXT: movq -96(%rsp,%rdi), %rsi
+; FALLBACK15-NEXT: movq -104(%rsp,%rdi), %r9
+; FALLBACK15-NEXT: movq %r9, %rax
+; FALLBACK15-NEXT: shrdq %cl, %rsi, %rax
+; FALLBACK15-NEXT: movq -112(%rsp,%rdi), %r10
; FALLBACK15-NEXT: movq %r10, %r8
; FALLBACK15-NEXT: shrdq %cl, %r9, %r8
-; FALLBACK15-NEXT: movq -80(%rsp,%rax), %r9
-; FALLBACK15-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK15-NEXT: movq -80(%rsp,%rdi), %r9
+; FALLBACK15-NEXT: movq -88(%rsp,%rdi), %r11
; FALLBACK15-NEXT: movq %r11, %rbx
; FALLBACK15-NEXT: shrdq %cl, %r9, %rbx
-; FALLBACK15-NEXT: shrdq %cl, %r11, %rdi
-; FALLBACK15-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK15-NEXT: shrdq %cl, %r11, %rsi
+; FALLBACK15-NEXT: movq -72(%rsp,%rdi), %r11
; FALLBACK15-NEXT: shrdq %cl, %r11, %r9
-; FALLBACK15-NEXT: movq -128(%rsp,%rax), %r14
-; FALLBACK15-NEXT: movq -120(%rsp,%rax), %rax
-; FALLBACK15-NEXT: movq %rax, %r15
+; FALLBACK15-NEXT: movq -128(%rsp,%rdi), %r14
+; FALLBACK15-NEXT: movq -120(%rsp,%rdi), %rdi
+; FALLBACK15-NEXT: movq %rdi, %r15
; FALLBACK15-NEXT: shrdq %cl, %r10, %r15
; FALLBACK15-NEXT: shrxq %rcx, %r11, %r10
; FALLBACK15-NEXT: # kill: def $cl killed $cl killed $rcx
-; FALLBACK15-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %r14
; FALLBACK15-NEXT: movq %r15, 8(%rdx)
; FALLBACK15-NEXT: movq %r9, 48(%rdx)
-; FALLBACK15-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK15-NEXT: movq %rsi, 32(%rdx)
; FALLBACK15-NEXT: movq %rbx, 40(%rdx)
; FALLBACK15-NEXT: movq %r8, 16(%rdx)
-; FALLBACK15-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK15-NEXT: movq %rax, 24(%rdx)
; FALLBACK15-NEXT: movq %r14, (%rdx)
; FALLBACK15-NEXT: movq %r10, 56(%rdx)
; FALLBACK15-NEXT: popq %rbx
@@ -13618,14 +13590,15 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 36(%eax), %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 40(%eax), %ebp
-; FALLBACK18-NEXT: movl 44(%eax), %ebx
+; FALLBACK18-NEXT: movl 40(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 44(%eax), %ebp
; FALLBACK18-NEXT: movl 48(%eax), %edi
; FALLBACK18-NEXT: movl 52(%eax), %esi
; FALLBACK18-NEXT: movl 56(%eax), %edx
; FALLBACK18-NEXT: movl 60(%eax), %ecx
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl (%eax), %eax
+; FALLBACK18-NEXT: movl (%eax), %ebx
; FALLBACK18-NEXT: xorps %xmm0, %xmm0
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
@@ -13634,136 +13607,138 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %eax, %ecx
-; FALLBACK18-NEXT: leal (,%eax,8), %edx
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: leal (,%ebx,8), %edx
; FALLBACK18-NEXT: andl $24, %edx
-; FALLBACK18-NEXT: andl $60, %ecx
-; FALLBACK18-NEXT: movl 68(%esp,%ecx), %esi
-; FALLBACK18-NEXT: movl 72(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %edx, %ecx
+; FALLBACK18-NEXT: andl $60, %ebx
+; FALLBACK18-NEXT: movl 68(%esp,%ebx), %esi
+; FALLBACK18-NEXT: movl 72(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %edi
-; FALLBACK18-NEXT: movl %edx, %ebx
-; FALLBACK18-NEXT: notb %bl
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK18-NEXT: notb %dl
; FALLBACK18-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, 64(%esp,%ebx), %edi
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl 80(%esp,%ebx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 76(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl 88(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 84(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl 96(%esp,%ebx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 92(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl 104(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 100(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl 112(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
-; FALLBACK18-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK18-NEXT: movl %ecx, %edi
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: movl 108(%esp,%ebx), %esi
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %ecx, %ebp
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %ecx, %esi
-; FALLBACK18-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK18-NEXT: orl %edi, %ecx
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, %esi, %ecx
+; FALLBACK18-NEXT: orl %eax, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 120(%esp,%ebx), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK18-NEXT: movl 116(%esp,%ebx), %eax
+; FALLBACK18-NEXT: movl %ebp, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %ebp
+; FALLBACK18-NEXT: orl %ebp, %esi
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %ecx, %ebp
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK18-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK18-NEXT: shrxl %edx, %ebp, %edx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK18-NEXT: orl %eax, %ebx
+; FALLBACK18-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl 124(%esp,%ebx), %eax
+; FALLBACK18-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK18-NEXT: shlxl %edx, %ebx, %edx
+; FALLBACK18-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %edx
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %edi
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %edx, 60(%eax)
-; FALLBACK18-NEXT: movl %ebx, 56(%eax)
-; FALLBACK18-NEXT: movl %edi, 48(%eax)
-; FALLBACK18-NEXT: movl %ecx, 52(%eax)
-; FALLBACK18-NEXT: movl %esi, 40(%eax)
+; FALLBACK18-NEXT: movl %edi, 60(%eax)
+; FALLBACK18-NEXT: movl %edx, 56(%eax)
+; FALLBACK18-NEXT: movl %ecx, 48(%eax)
+; FALLBACK18-NEXT: movl %esi, 52(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 40(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK18-NEXT: movl %ecx, 44(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -14284,7 +14259,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
; FALLBACK22-NEXT: movups 32(%ecx), %xmm2
; FALLBACK22-NEXT: movups 48(%ecx), %xmm3
-; FALLBACK22-NEXT: movl (%eax), %ecx
+; FALLBACK22-NEXT: movl (%eax), %ebx
; FALLBACK22-NEXT: xorps %xmm4, %xmm4
; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
@@ -14294,112 +14269,114 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: leal (,%ecx,8), %edx
+; FALLBACK22-NEXT: leal (,%ebx,8), %edx
; FALLBACK22-NEXT: andl $24, %edx
-; FALLBACK22-NEXT: andl $60, %ecx
-; FALLBACK22-NEXT: movl 68(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl 72(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %edx, %ecx
+; FALLBACK22-NEXT: andl $60, %ebx
+; FALLBACK22-NEXT: movl 68(%esp,%ebx), %esi
+; FALLBACK22-NEXT: movl 72(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %edi
-; FALLBACK22-NEXT: movl %edx, %ebx
-; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK22-NEXT: notb %dl
; FALLBACK22-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebp
-; FALLBACK22-NEXT: orl %edi, %ebp
-; FALLBACK22-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shlxl %edx, %ebp, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %ecx, 64(%esp,%ebx), %edi
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %edi, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 80(%esp,%ebx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 76(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl 88(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 84(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK22-NEXT: movl 96(%esp,%ebx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 92(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl 104(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 100(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: movl 112(%esp,%ecx), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: leal (%ecx,%ecx), %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %ecx
-; FALLBACK22-NEXT: movl 108(%esp,%eax), %esi
+; FALLBACK22-NEXT: movl 112(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: orl %ebp, %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: leal (%eax,%eax), %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK22-NEXT: movl 108(%esp,%ebx), %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %ecx, %ebp
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl 120(%esp,%eax), %ebp
-; FALLBACK22-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK22-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: movl 116(%esp,%eax), %eax
-; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK22-NEXT: orl %edi, %ecx
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, %esi, %ecx
+; FALLBACK22-NEXT: orl %eax, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 120(%esp,%ebx), %edi
+; FALLBACK22-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK22-NEXT: movl 116(%esp,%ebx), %eax
+; FALLBACK22-NEXT: movl %ebp, %ecx
+; FALLBACK22-NEXT: shrxl %ebp, %eax, %ebp
+; FALLBACK22-NEXT: orl %ebp, %esi
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %ecx, %ebp
; FALLBACK22-NEXT: addl %eax, %eax
-; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK22-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK22-NEXT: shrxl %edx, %ebp, %edx
-; FALLBACK22-NEXT: addl %ebp, %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK22-NEXT: orl %eax, %ebx
+; FALLBACK22-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl 124(%esp,%ebx), %eax
+; FALLBACK22-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK22-NEXT: shlxl %edx, %ebx, %edx
+; FALLBACK22-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK22-NEXT: orl %edi, %edx
+; FALLBACK22-NEXT: shrxl %ebp, %eax, %edi
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK22-NEXT: movl %edx, 60(%eax)
-; FALLBACK22-NEXT: movl %ebx, 56(%eax)
-; FALLBACK22-NEXT: movl %edi, 48(%eax)
-; FALLBACK22-NEXT: movl %ecx, 52(%eax)
-; FALLBACK22-NEXT: movl %esi, 40(%eax)
+; FALLBACK22-NEXT: movl %edi, 60(%eax)
+; FALLBACK22-NEXT: movl %edx, 56(%eax)
+; FALLBACK22-NEXT: movl %ecx, 48(%eax)
+; FALLBACK22-NEXT: movl %esi, 52(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 40(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK22-NEXT: movl %ecx, 44(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -14873,109 +14850,107 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: leal (,%ecx,8), %edx
; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: movl %edx, %ebx
; FALLBACK26-NEXT: andl $60, %ecx
; FALLBACK26-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK26-NEXT: movl 72(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %edi
-; FALLBACK26-NEXT: movl %edx, %ebx
-; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: notb %dl
; FALLBACK26-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK26-NEXT: shlxl %edx, %ebp, %ebp
; FALLBACK26-NEXT: orl %edi, %ebp
; FALLBACK26-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
; FALLBACK26-NEXT: orl %edi, %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
; FALLBACK26-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %eax, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 120(%esp,%ecx), %ebp
-; FALLBACK26-NEXT: leal (%ebp,%ebp), %eax
-; FALLBACK26-NEXT: shlxl %ebx, %eax, %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: orl %eax, %ebp
+; FALLBACK26-NEXT: movl 120(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %edx, %eax, %esi
; FALLBACK26-NEXT: movl 116(%esp,%ecx), %eax
-; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %edi
; FALLBACK26-NEXT: orl %edi, %esi
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %eax, %eax
-; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK26-NEXT: shlxl %edx, %eax, %eax
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl 124(%esp,%ecx), %ecx
-; FALLBACK26-NEXT: shrxl %edx, %ecx, %edx
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ebx
-; FALLBACK26-NEXT: orl %eax, %ebx
+; FALLBACK26-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edx
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %edi, %edx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %edi
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FALLBACK26-NEXT: movl %edx, 60(%ecx)
-; FALLBACK26-NEXT: movl %ebx, 56(%ecx)
-; FALLBACK26-NEXT: movl %edi, 48(%ecx)
+; FALLBACK26-NEXT: movl %edi, 60(%ecx)
+; FALLBACK26-NEXT: movl %edx, 56(%ecx)
+; FALLBACK26-NEXT: movl %eax, 48(%ecx)
; FALLBACK26-NEXT: movl %esi, 52(%ecx)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 40(%ecx)
+; FALLBACK26-NEXT: movl %ebp, 40(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK26-NEXT: movl %eax, 44(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -15430,115 +15405,113 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %zmm0
-; FALLBACK30-NEXT: movl (%eax), %edx
+; FALLBACK30-NEXT: movl (%eax), %ecx
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: leal (,%edx,8), %ecx
-; FALLBACK30-NEXT: andl $24, %ecx
-; FALLBACK30-NEXT: andl $60, %edx
-; FALLBACK30-NEXT: movl 68(%esp,%edx), %esi
-; FALLBACK30-NEXT: movl 72(%esp,%edx), %eax
+; FALLBACK30-NEXT: leal (,%ecx,8), %edx
+; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: andl $60, %ecx
+; FALLBACK30-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK30-NEXT: movl 72(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %edi
-; FALLBACK30-NEXT: movl %ecx, %ebx
-; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: notb %dl
; FALLBACK30-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %ebp, %ebp
; FALLBACK30-NEXT: orl %edi, %ebp
; FALLBACK30-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, 64(%esp,%edx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
; FALLBACK30-NEXT: orl %edi, %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 80(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 76(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 88(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 84(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 96(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 92(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 104(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 100(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 112(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
-; FALLBACK30-NEXT: movl 108(%esp,%edx), %esi
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK30-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %eax, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 120(%esp,%edx), %ebp
-; FALLBACK30-NEXT: leal (%ebp,%ebp), %eax
-; FALLBACK30-NEXT: shlxl %ebx, %eax, %esi
-; FALLBACK30-NEXT: movl 116(%esp,%edx), %eax
-; FALLBACK30-NEXT: shrxl %ecx, %eax, %edi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: orl %eax, %ebp
+; FALLBACK30-NEXT: movl 120(%esp,%ecx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %edx, %eax, %esi
+; FALLBACK30-NEXT: movl 116(%esp,%ecx), %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %edi
; FALLBACK30-NEXT: orl %edi, %esi
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %eax, %eax
-; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: shrxl %ecx, %ebp, %eax
-; FALLBACK30-NEXT: movl 124(%esp,%edx), %edx
-; FALLBACK30-NEXT: shrxl %ecx, %edx, %ebp
-; FALLBACK30-NEXT: leal (%edx,%edx), %ecx
-; FALLBACK30-NEXT: shlxl %ebx, %ecx, %edx
-; FALLBACK30-NEXT: orl %eax, %edx
+; FALLBACK30-NEXT: shlxl %edx, %eax, %eax
+; FALLBACK30-NEXT: orl %edi, %eax
+; FALLBACK30-NEXT: movl 124(%esp,%ecx), %ecx
+; FALLBACK30-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edx
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %edi, %edx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %edi
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FALLBACK30-NEXT: movl %ebp, 60(%ecx)
+; FALLBACK30-NEXT: movl %edi, 60(%ecx)
; FALLBACK30-NEXT: movl %edx, 56(%ecx)
-; FALLBACK30-NEXT: movl %edi, 48(%ecx)
+; FALLBACK30-NEXT: movl %eax, 48(%ecx)
; FALLBACK30-NEXT: movl %esi, 52(%ecx)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 40(%ecx)
+; FALLBACK30-NEXT: movl %ebp, 40(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK30-NEXT: movl %eax, 44(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -16196,10 +16169,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK2-LABEL: shl_64bytes:
; FALLBACK2: # %bb.0:
-; FALLBACK2-NEXT: pushq %rbp
; FALLBACK2-NEXT: pushq %r15
; FALLBACK2-NEXT: pushq %r14
-; FALLBACK2-NEXT: pushq %r13
; FALLBACK2-NEXT: pushq %r12
; FALLBACK2-NEXT: pushq %rbx
; FALLBACK2-NEXT: pushq %rax
@@ -16227,62 +16198,60 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: leal (,%rsi,8), %eax
; FALLBACK2-NEXT: andl $56, %eax
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andl $56, %esi
; FALLBACK2-NEXT: negl %esi
; FALLBACK2-NEXT: movslq %esi, %rsi
-; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %r10
-; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %r9
-; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
-; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
-; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %r14
-; FALLBACK2-NEXT: shlxq %rax, %r14, %rbx
-; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %r8
-; FALLBACK2-NEXT: shlxq %rax, %r8, %r15
-; FALLBACK2-NEXT: shlxq %rax, %r10, %r12
-; FALLBACK2-NEXT: movl %eax, %r13d
-; FALLBACK2-NEXT: notb %r13b
-; FALLBACK2-NEXT: shrq %r10
-; FALLBACK2-NEXT: shrxq %r13, %r10, %r10
-; FALLBACK2-NEXT: orq %r9, %r10
-; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %r9
-; FALLBACK2-NEXT: shlxq %rax, %r9, %rbp
-; FALLBACK2-NEXT: shrq %r14
-; FALLBACK2-NEXT: shrxq %r13, %r14, %r14
-; FALLBACK2-NEXT: orq %r11, %r14
-; FALLBACK2-NEXT: shlxq %rax, -8(%rsp,%rsi), %r11
-; FALLBACK2-NEXT: movq -16(%rsp,%rsi), %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rax
-; FALLBACK2-NEXT: shrq %rcx
-; FALLBACK2-NEXT: shrxq %r13, %rcx, %rcx
-; FALLBACK2-NEXT: orq %rbx, %rcx
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %r9
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %r8
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shlxq %rcx, %r9, %r10
; FALLBACK2-NEXT: shrq %r9
-; FALLBACK2-NEXT: shrxq %r13, %r9, %r9
-; FALLBACK2-NEXT: orq %r15, %r9
+; FALLBACK2-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK2-NEXT: orq %r8, %r9
+; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %r11
+; FALLBACK2-NEXT: shlxq %rcx, %r11, %rbx
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %r8
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r14
+; FALLBACK2-NEXT: shrq %r8
+; FALLBACK2-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK2-NEXT: orq %rbx, %r8
; FALLBACK2-NEXT: shrq %rdi
-; FALLBACK2-NEXT: shrxq %r13, %rdi, %rdi
-; FALLBACK2-NEXT: orq %rbp, %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r14, %rdi
+; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rbx
+; FALLBACK2-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %r15
+; FALLBACK2-NEXT: shlxq %rcx, %r15, %r12
+; FALLBACK2-NEXT: shrq %r15
+; FALLBACK2-NEXT: shrxq %rax, %r15, %r15
+; FALLBACK2-NEXT: orq %r14, %r15
+; FALLBACK2-NEXT: shrq %r11
+; FALLBACK2-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK2-NEXT: orq %r12, %r11
+; FALLBACK2-NEXT: shlxq %rcx, -8(%rsp,%rsi), %r14
+; FALLBACK2-NEXT: movq -16(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: shlxq %rcx, %rsi, %rcx
; FALLBACK2-NEXT: shrq %rsi
-; FALLBACK2-NEXT: shrxq %r13, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r11, %rsi
-; FALLBACK2-NEXT: shrq %r8
-; FALLBACK2-NEXT: shrxq %r13, %r8, %r8
-; FALLBACK2-NEXT: orq %rax, %r8
-; FALLBACK2-NEXT: movq %r12, (%rdx)
-; FALLBACK2-NEXT: movq %r8, 48(%rdx)
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r14, %rsi
+; FALLBACK2-NEXT: shrq %rbx
+; FALLBACK2-NEXT: shrxq %rax, %rbx, %rax
+; FALLBACK2-NEXT: orq %rcx, %rax
+; FALLBACK2-NEXT: movq %r10, (%rdx)
+; FALLBACK2-NEXT: movq %rax, 48(%rdx)
; FALLBACK2-NEXT: movq %rsi, 56(%rdx)
-; FALLBACK2-NEXT: movq %rdi, 32(%rdx)
-; FALLBACK2-NEXT: movq %r9, 40(%rdx)
-; FALLBACK2-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK2-NEXT: movq %r14, 24(%rdx)
-; FALLBACK2-NEXT: movq %r10, 8(%rdx)
+; FALLBACK2-NEXT: movq %r11, 32(%rdx)
+; FALLBACK2-NEXT: movq %r15, 40(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK2-NEXT: movq %r8, 24(%rdx)
+; FALLBACK2-NEXT: movq %r9, 8(%rdx)
; FALLBACK2-NEXT: addq $8, %rsp
; FALLBACK2-NEXT: popq %rbx
; FALLBACK2-NEXT: popq %r12
-; FALLBACK2-NEXT: popq %r13
; FALLBACK2-NEXT: popq %r14
; FALLBACK2-NEXT: popq %r15
-; FALLBACK2-NEXT: popq %rbp
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: shl_64bytes:
@@ -16509,86 +16478,81 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK6-LABEL: shl_64bytes:
; FALLBACK6: # %bb.0:
-; FALLBACK6-NEXT: pushq %rbp
; FALLBACK6-NEXT: pushq %r15
; FALLBACK6-NEXT: pushq %r14
-; FALLBACK6-NEXT: pushq %r13
; FALLBACK6-NEXT: pushq %r12
; FALLBACK6-NEXT: pushq %rbx
-; FALLBACK6-NEXT: subq $24, %rsp
+; FALLBACK6-NEXT: pushq %rax
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
; FALLBACK6-NEXT: movups 48(%rdi), %xmm3
-; FALLBACK6-NEXT: movl (%rsi), %eax
+; FALLBACK6-NEXT: movl (%rsi), %esi
; FALLBACK6-NEXT: xorps %xmm4, %xmm4
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: movaps %xmm3, (%rsp)
+; FALLBACK6-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: leal (,%rax,8), %ecx
-; FALLBACK6-NEXT: andl $56, %ecx
+; FALLBACK6-NEXT: leal (,%rsi,8), %eax
; FALLBACK6-NEXT: andl $56, %eax
-; FALLBACK6-NEXT: negl %eax
-; FALLBACK6-NEXT: movslq %eax, %rsi
-; FALLBACK6-NEXT: movq -8(%rsp,%rsi), %rax
-; FALLBACK6-NEXT: shlxq %rcx, %rax, %r12
-; FALLBACK6-NEXT: movq -16(%rsp,%rsi), %rdi
-; FALLBACK6-NEXT: shlxq %rcx, %rdi, %r15
-; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %r13
-; FALLBACK6-NEXT: shlxq %rcx, %r13, %r8
-; FALLBACK6-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %r11
-; FALLBACK6-NEXT: shlxq %rcx, %r11, %r10
-; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %r14
-; FALLBACK6-NEXT: shlxq %rcx, %r14, %rbx
-; FALLBACK6-NEXT: movl %ecx, %r9d
-; FALLBACK6-NEXT: notb %r9b
+; FALLBACK6-NEXT: movl %eax, %ecx
+; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: negl %esi
+; FALLBACK6-NEXT: movslq %esi, %rsi
+; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %rdi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %r9
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK6-NEXT: shrq %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK6-NEXT: orq %r9, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %r9
+; FALLBACK6-NEXT: shlxq %rcx, %r9, %r11
+; FALLBACK6-NEXT: shrq %r9
+; FALLBACK6-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK6-NEXT: orq %r10, %r9
+; FALLBACK6-NEXT: movq -48(%rsp,%rsi), %r10
+; FALLBACK6-NEXT: shlxq %rcx, %r10, %r14
+; FALLBACK6-NEXT: shrq %r10
+; FALLBACK6-NEXT: shrxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %r11, %r10
+; FALLBACK6-NEXT: movq -64(%rsp,%rsi), %rbx
+; FALLBACK6-NEXT: movq -56(%rsp,%rsi), %r11
+; FALLBACK6-NEXT: shlxq %rcx, %r11, %r15
+; FALLBACK6-NEXT: shrq %r11
+; FALLBACK6-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %r14, %r11
+; FALLBACK6-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK6-NEXT: shrq %rbx
+; FALLBACK6-NEXT: shrxq %rax, %rbx, %rbx
+; FALLBACK6-NEXT: orq %r15, %rbx
+; FALLBACK6-NEXT: movq -16(%rsp,%rsi), %r15
+; FALLBACK6-NEXT: shlxq %rcx, %r15, %r12
; FALLBACK6-NEXT: shrq %rdi
-; FALLBACK6-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
; FALLBACK6-NEXT: orq %r12, %rdi
-; FALLBACK6-NEXT: movq (%rsp,%rsi), %rbp
-; FALLBACK6-NEXT: shlxq %rcx, %rbp, %r8
-; FALLBACK6-NEXT: shrq %r13
-; FALLBACK6-NEXT: shrxq %r9, %r13, %r12
-; FALLBACK6-NEXT: orq %r15, %r12
-; FALLBACK6-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
-; FALLBACK6-NEXT: movq -48(%rsp,%rsi), %rsi
-; FALLBACK6-NEXT: shlxq %rcx, %rsi, %rcx
-; FALLBACK6-NEXT: shrq %r11
-; FALLBACK6-NEXT: shrxq %r9, %r11, %r11
-; FALLBACK6-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; FALLBACK6-NEXT: shrq %r14
-; FALLBACK6-NEXT: shrxq %r9, %r14, %r14
-; FALLBACK6-NEXT: orq %r10, %r14
-; FALLBACK6-NEXT: shrq %rsi
-; FALLBACK6-NEXT: shrxq %r9, %rsi, %rsi
-; FALLBACK6-NEXT: orq %rbx, %rsi
-; FALLBACK6-NEXT: shrq %rax
-; FALLBACK6-NEXT: shrxq %r9, %rax, %rax
-; FALLBACK6-NEXT: orq %r8, %rax
-; FALLBACK6-NEXT: shrq %rbp
-; FALLBACK6-NEXT: shrxq %r9, %rbp, %r8
-; FALLBACK6-NEXT: orq %r15, %r8
-; FALLBACK6-NEXT: movq %rcx, (%rdx)
-; FALLBACK6-NEXT: movq %r8, 56(%rdx)
-; FALLBACK6-NEXT: movq %rax, 48(%rdx)
-; FALLBACK6-NEXT: movq %rsi, 8(%rdx)
-; FALLBACK6-NEXT: movq %r14, 16(%rdx)
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
-; FALLBACK6-NEXT: movq %r12, 32(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 40(%rdx)
-; FALLBACK6-NEXT: addq $24, %rsp
+; FALLBACK6-NEXT: shlxq %rcx, -8(%rsp,%rsi), %rcx
+; FALLBACK6-NEXT: shrq %r15
+; FALLBACK6-NEXT: shrxq %rax, %r15, %rax
+; FALLBACK6-NEXT: orq %rcx, %rax
+; FALLBACK6-NEXT: movq %r14, (%rdx)
+; FALLBACK6-NEXT: movq %rax, 56(%rdx)
+; FALLBACK6-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK6-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, 24(%rdx)
+; FALLBACK6-NEXT: movq %r9, 32(%rdx)
+; FALLBACK6-NEXT: movq %r8, 40(%rdx)
+; FALLBACK6-NEXT: addq $8, %rsp
; FALLBACK6-NEXT: popq %rbx
; FALLBACK6-NEXT: popq %r12
-; FALLBACK6-NEXT: popq %r13
; FALLBACK6-NEXT: popq %r14
; FALLBACK6-NEXT: popq %r15
-; FALLBACK6-NEXT: popq %rbp
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: shl_64bytes:
@@ -16798,80 +16762,75 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK10-LABEL: shl_64bytes:
; FALLBACK10: # %bb.0:
-; FALLBACK10-NEXT: pushq %rbp
; FALLBACK10-NEXT: pushq %r15
; FALLBACK10-NEXT: pushq %r14
-; FALLBACK10-NEXT: pushq %r13
; FALLBACK10-NEXT: pushq %r12
; FALLBACK10-NEXT: pushq %rbx
-; FALLBACK10-NEXT: subq $24, %rsp
+; FALLBACK10-NEXT: pushq %rax
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
; FALLBACK10-NEXT: vmovups 32(%rdi), %ymm1
-; FALLBACK10-NEXT: movl (%rsi), %eax
+; FALLBACK10-NEXT: movl (%rsi), %esi
; FALLBACK10-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: leal (,%rax,8), %ecx
-; FALLBACK10-NEXT: andl $56, %ecx
+; FALLBACK10-NEXT: leal (,%rsi,8), %eax
; FALLBACK10-NEXT: andl $56, %eax
-; FALLBACK10-NEXT: negl %eax
-; FALLBACK10-NEXT: movslq %eax, %rsi
-; FALLBACK10-NEXT: movq -8(%rsp,%rsi), %rax
-; FALLBACK10-NEXT: shlxq %rcx, %rax, %r12
-; FALLBACK10-NEXT: movq -16(%rsp,%rsi), %rdi
-; FALLBACK10-NEXT: shlxq %rcx, %rdi, %r15
-; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %r13
-; FALLBACK10-NEXT: shlxq %rcx, %r13, %r8
-; FALLBACK10-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %r11
-; FALLBACK10-NEXT: shlxq %rcx, %r11, %r10
-; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %r14
-; FALLBACK10-NEXT: shlxq %rcx, %r14, %rbx
-; FALLBACK10-NEXT: movl %ecx, %r9d
-; FALLBACK10-NEXT: notb %r9b
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: negl %esi
+; FALLBACK10-NEXT: movslq %esi, %rsi
+; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %rdi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %r9
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK10-NEXT: shrq %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %r9, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %r11
+; FALLBACK10-NEXT: shrq %r9
+; FALLBACK10-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK10-NEXT: orq %r10, %r9
+; FALLBACK10-NEXT: movq -48(%rsp,%rsi), %r10
+; FALLBACK10-NEXT: shlxq %rcx, %r10, %r14
+; FALLBACK10-NEXT: shrq %r10
+; FALLBACK10-NEXT: shrxq %rax, %r10, %r10
+; FALLBACK10-NEXT: orq %r11, %r10
+; FALLBACK10-NEXT: movq -64(%rsp,%rsi), %rbx
+; FALLBACK10-NEXT: movq -56(%rsp,%rsi), %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r15
+; FALLBACK10-NEXT: shrq %r11
+; FALLBACK10-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %r14, %r11
+; FALLBACK10-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK10-NEXT: shrq %rbx
+; FALLBACK10-NEXT: shrxq %rax, %rbx, %rbx
+; FALLBACK10-NEXT: orq %r15, %rbx
+; FALLBACK10-NEXT: movq -16(%rsp,%rsi), %r15
+; FALLBACK10-NEXT: shlxq %rcx, %r15, %r12
; FALLBACK10-NEXT: shrq %rdi
-; FALLBACK10-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
; FALLBACK10-NEXT: orq %r12, %rdi
-; FALLBACK10-NEXT: movq (%rsp,%rsi), %rbp
-; FALLBACK10-NEXT: shlxq %rcx, %rbp, %r8
-; FALLBACK10-NEXT: shrq %r13
-; FALLBACK10-NEXT: shrxq %r9, %r13, %r12
-; FALLBACK10-NEXT: orq %r15, %r12
-; FALLBACK10-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
-; FALLBACK10-NEXT: movq -48(%rsp,%rsi), %rsi
-; FALLBACK10-NEXT: shlxq %rcx, %rsi, %rcx
-; FALLBACK10-NEXT: shrq %r11
-; FALLBACK10-NEXT: shrxq %r9, %r11, %r11
-; FALLBACK10-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; FALLBACK10-NEXT: shrq %r14
-; FALLBACK10-NEXT: shrxq %r9, %r14, %r14
-; FALLBACK10-NEXT: orq %r10, %r14
-; FALLBACK10-NEXT: shrq %rsi
-; FALLBACK10-NEXT: shrxq %r9, %rsi, %rsi
-; FALLBACK10-NEXT: orq %rbx, %rsi
-; FALLBACK10-NEXT: shrq %rax
-; FALLBACK10-NEXT: shrxq %r9, %rax, %rax
-; FALLBACK10-NEXT: orq %r8, %rax
-; FALLBACK10-NEXT: shrq %rbp
-; FALLBACK10-NEXT: shrxq %r9, %rbp, %r8
-; FALLBACK10-NEXT: orq %r15, %r8
-; FALLBACK10-NEXT: movq %rcx, (%rdx)
-; FALLBACK10-NEXT: movq %r8, 56(%rdx)
-; FALLBACK10-NEXT: movq %rax, 48(%rdx)
-; FALLBACK10-NEXT: movq %rsi, 8(%rdx)
-; FALLBACK10-NEXT: movq %r14, 16(%rdx)
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %r12, 32(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 40(%rdx)
-; FALLBACK10-NEXT: addq $24, %rsp
+; FALLBACK10-NEXT: shlxq %rcx, -8(%rsp,%rsi), %rcx
+; FALLBACK10-NEXT: shrq %r15
+; FALLBACK10-NEXT: shrxq %rax, %r15, %rax
+; FALLBACK10-NEXT: orq %rcx, %rax
+; FALLBACK10-NEXT: movq %r14, (%rdx)
+; FALLBACK10-NEXT: movq %rax, 56(%rdx)
+; FALLBACK10-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK10-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, 24(%rdx)
+; FALLBACK10-NEXT: movq %r9, 32(%rdx)
+; FALLBACK10-NEXT: movq %r8, 40(%rdx)
+; FALLBACK10-NEXT: addq $8, %rsp
; FALLBACK10-NEXT: popq %rbx
; FALLBACK10-NEXT: popq %r12
-; FALLBACK10-NEXT: popq %r13
; FALLBACK10-NEXT: popq %r14
; FALLBACK10-NEXT: popq %r15
-; FALLBACK10-NEXT: popq %rbp
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -17071,77 +17030,72 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK14-LABEL: shl_64bytes:
; FALLBACK14: # %bb.0:
-; FALLBACK14-NEXT: pushq %rbp
; FALLBACK14-NEXT: pushq %r15
; FALLBACK14-NEXT: pushq %r14
-; FALLBACK14-NEXT: pushq %r13
; FALLBACK14-NEXT: pushq %r12
; FALLBACK14-NEXT: pushq %rbx
-; FALLBACK14-NEXT: subq $24, %rsp
+; FALLBACK14-NEXT: pushq %rax
; FALLBACK14-NEXT: vmovups (%rdi), %zmm0
-; FALLBACK14-NEXT: movl (%rsi), %eax
+; FALLBACK14-NEXT: movl (%rsi), %esi
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: leal (,%rax,8), %ecx
-; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: leal (,%rsi,8), %eax
; FALLBACK14-NEXT: andl $56, %eax
-; FALLBACK14-NEXT: negl %eax
-; FALLBACK14-NEXT: movslq %eax, %rsi
-; FALLBACK14-NEXT: movq -8(%rsp,%rsi), %rax
-; FALLBACK14-NEXT: shlxq %rcx, %rax, %r12
-; FALLBACK14-NEXT: movq -16(%rsp,%rsi), %rdi
-; FALLBACK14-NEXT: shlxq %rcx, %rdi, %r15
-; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %r13
-; FALLBACK14-NEXT: shlxq %rcx, %r13, %r8
-; FALLBACK14-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %r11
-; FALLBACK14-NEXT: shlxq %rcx, %r11, %r10
-; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %r14
-; FALLBACK14-NEXT: shlxq %rcx, %r14, %rbx
-; FALLBACK14-NEXT: movl %ecx, %r9d
-; FALLBACK14-NEXT: notb %r9b
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: andl $56, %esi
+; FALLBACK14-NEXT: negl %esi
+; FALLBACK14-NEXT: movslq %esi, %rsi
+; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %r9
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK14-NEXT: shrq %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %r9, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %r11
+; FALLBACK14-NEXT: shrq %r9
+; FALLBACK14-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK14-NEXT: orq %r10, %r9
+; FALLBACK14-NEXT: movq -48(%rsp,%rsi), %r10
+; FALLBACK14-NEXT: shlxq %rcx, %r10, %r14
+; FALLBACK14-NEXT: shrq %r10
+; FALLBACK14-NEXT: shrxq %rax, %r10, %r10
+; FALLBACK14-NEXT: orq %r11, %r10
+; FALLBACK14-NEXT: movq -64(%rsp,%rsi), %rbx
+; FALLBACK14-NEXT: movq -56(%rsp,%rsi), %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r15
+; FALLBACK14-NEXT: shrq %r11
+; FALLBACK14-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %r14, %r11
+; FALLBACK14-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK14-NEXT: shrq %rbx
+; FALLBACK14-NEXT: shrxq %rax, %rbx, %rbx
+; FALLBACK14-NEXT: orq %r15, %rbx
+; FALLBACK14-NEXT: movq -16(%rsp,%rsi), %r15
+; FALLBACK14-NEXT: shlxq %rcx, %r15, %r12
; FALLBACK14-NEXT: shrq %rdi
-; FALLBACK14-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
; FALLBACK14-NEXT: orq %r12, %rdi
-; FALLBACK14-NEXT: movq (%rsp,%rsi), %rbp
-; FALLBACK14-NEXT: shlxq %rcx, %rbp, %r8
-; FALLBACK14-NEXT: shrq %r13
-; FALLBACK14-NEXT: shrxq %r9, %r13, %r12
-; FALLBACK14-NEXT: orq %r15, %r12
-; FALLBACK14-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
-; FALLBACK14-NEXT: movq -48(%rsp,%rsi), %rsi
-; FALLBACK14-NEXT: shlxq %rcx, %rsi, %rcx
-; FALLBACK14-NEXT: shrq %r11
-; FALLBACK14-NEXT: shrxq %r9, %r11, %r11
-; FALLBACK14-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; FALLBACK14-NEXT: shrq %r14
-; FALLBACK14-NEXT: shrxq %r9, %r14, %r14
-; FALLBACK14-NEXT: orq %r10, %r14
-; FALLBACK14-NEXT: shrq %rsi
-; FALLBACK14-NEXT: shrxq %r9, %rsi, %rsi
-; FALLBACK14-NEXT: orq %rbx, %rsi
-; FALLBACK14-NEXT: shrq %rax
-; FALLBACK14-NEXT: shrxq %r9, %rax, %rax
-; FALLBACK14-NEXT: orq %r8, %rax
-; FALLBACK14-NEXT: shrq %rbp
-; FALLBACK14-NEXT: shrxq %r9, %rbp, %r8
-; FALLBACK14-NEXT: orq %r15, %r8
-; FALLBACK14-NEXT: movq %rcx, (%rdx)
-; FALLBACK14-NEXT: movq %r8, 56(%rdx)
-; FALLBACK14-NEXT: movq %rax, 48(%rdx)
-; FALLBACK14-NEXT: movq %rsi, 8(%rdx)
-; FALLBACK14-NEXT: movq %r14, 16(%rdx)
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %r12, 32(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 40(%rdx)
-; FALLBACK14-NEXT: addq $24, %rsp
+; FALLBACK14-NEXT: shlxq %rcx, -8(%rsp,%rsi), %rcx
+; FALLBACK14-NEXT: shrq %r15
+; FALLBACK14-NEXT: shrxq %rax, %r15, %rax
+; FALLBACK14-NEXT: orq %rcx, %rax
+; FALLBACK14-NEXT: movq %r14, (%rdx)
+; FALLBACK14-NEXT: movq %rax, 56(%rdx)
+; FALLBACK14-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK14-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, 24(%rdx)
+; FALLBACK14-NEXT: movq %r9, 32(%rdx)
+; FALLBACK14-NEXT: movq %r8, 40(%rdx)
+; FALLBACK14-NEXT: addq $8, %rsp
; FALLBACK14-NEXT: popq %rbx
; FALLBACK14-NEXT: popq %r12
-; FALLBACK14-NEXT: popq %r13
; FALLBACK14-NEXT: popq %r14
; FALLBACK14-NEXT: popq %r15
-; FALLBACK14-NEXT: popq %rbp
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -17681,144 +17635,149 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: leal (,%ebp,8), %edx
-; FALLBACK18-NEXT: andl $24, %edx
+; FALLBACK18-NEXT: leal (,%ebp,8), %ebx
+; FALLBACK18-NEXT: andl $24, %ebx
+; FALLBACK18-NEXT: movl %ebx, %eax
; FALLBACK18-NEXT: andl $60, %ebp
; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK18-NEXT: subl %ebp, %edi
-; FALLBACK18-NEXT: movl (%edi), %ecx
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 4(%edi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %edx, %ebx
+; FALLBACK18-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK18-NEXT: subl %ebp, %edx
+; FALLBACK18-NEXT: movl (%edx), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%edx), %ecx
; FALLBACK18-NEXT: notb %bl
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK18-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK18-NEXT: orl %ecx, %esi
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK18-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK18-NEXT: movl %eax, %ebp
+; FALLBACK18-NEXT: orl %esi, %edi
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%edx), %esi
; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 8(%edi), %esi
-; FALLBACK18-NEXT: movl %esi, %ecx
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK18-NEXT: movl 12(%edi), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: orl %esi, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 16(%edi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: movl 20(%edi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: movl 12(%edx), %esi
+; FALLBACK18-NEXT: movl %ebp, %edi
+; FALLBACK18-NEXT: shlxl %ebp, %esi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: shrl %ecx
; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK18-NEXT: orl %eax, %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 24(%edi), %ecx
+; FALLBACK18-NEXT: movl 16(%edx), %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: shrl %ecx
; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK18-NEXT: movl 28(%edi), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK18-NEXT: movl 20(%edx), %ecx
+; FALLBACK18-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %eax, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 32(%edi), %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: movl 36(%edi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: movl 24(%edx), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: movl 28(%edx), %esi
+; FALLBACK18-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK18-NEXT: orl %eax, %ecx
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 40(%edi), %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 32(%edx), %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: shrl %ecx
; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK18-NEXT: movl 44(%edi), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK18-NEXT: movl 36(%edx), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %edi, %eax
; FALLBACK18-NEXT: shrl %esi
; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %eax, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 48(%edi), %esi
+; FALLBACK18-NEXT: orl %ebp, %esi
; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 40(%edx), %edi
+; FALLBACK18-NEXT: movl %edi, %esi
; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK18-NEXT: movl 52(%edi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK18-NEXT: movl 44(%edx), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK18-NEXT: movl %eax, %esi
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: shrl %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK18-NEXT: orl %eax, %ebp
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl 48(%edx), %ebp
+; FALLBACK18-NEXT: movl %ebp, %edi
+; FALLBACK18-NEXT: shrl %edi
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 52(%edx), %ecx
+; FALLBACK18-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK18-NEXT: movl %esi, %ebp
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK18-NEXT: negl %eax
-; FALLBACK18-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK18-NEXT: movl 56(%edi), %eax
-; FALLBACK18-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %edx, %esi
; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: orl %eax, %ecx
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK18-NEXT: movl %edx, (%eax)
-; FALLBACK18-NEXT: movl %esi, 56(%eax)
-; FALLBACK18-NEXT: movl %ecx, 60(%eax)
-; FALLBACK18-NEXT: movl %ebp, 48(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 52(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 40(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 44(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 32(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 36(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 24(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 28(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 16(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK18-NEXT: orl %edi, %esi
+; FALLBACK18-NEXT: movl 56(%edx), %edi
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK18-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: shrl %edi
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK18-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK18-NEXT: negl %ebx
+; FALLBACK18-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK18-NEXT: orl %ecx, %ebx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK18-NEXT: movl %edi, (%edx)
+; FALLBACK18-NEXT: movl %eax, 56(%edx)
+; FALLBACK18-NEXT: movl %ebx, 60(%edx)
+; FALLBACK18-NEXT: movl %esi, 48(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 52(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 40(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 44(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 32(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 36(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 24(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 28(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 16(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 20(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edx)
; FALLBACK18-NEXT: addl $204, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -18342,144 +18301,150 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: leal (,%eax,8), %edx
-; FALLBACK22-NEXT: andl $24, %edx
+; FALLBACK22-NEXT: leal (,%eax,8), %ebx
+; FALLBACK22-NEXT: andl $24, %ebx
+; FALLBACK22-NEXT: movl %ebx, %ecx
; FALLBACK22-NEXT: andl $60, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK22-NEXT: subl %eax, %edi
-; FALLBACK22-NEXT: movl (%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 4(%edi), %eax
+; FALLBACK22-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: subl %eax, %edx
+; FALLBACK22-NEXT: movl (%edx), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 4(%edx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %edx, %ebx
; FALLBACK22-NEXT: notb %bl
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK22-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK22-NEXT: shlxl %ecx, %eax, %esi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 8(%edx), %esi
; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 8(%edi), %esi
-; FALLBACK22-NEXT: movl %esi, %ecx
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK22-NEXT: movl 12(%edi), %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
-; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: orl %esi, %eax
-; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 16(%edi), %eax
-; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: movl 20(%edi), %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: movl 12(%edx), %esi
+; FALLBACK22-NEXT: shlxl %ecx, %esi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %ecx, %edi
+; FALLBACK22-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK22-NEXT: shrl %ecx
; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK22-NEXT: orl %eax, %ecx
; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 24(%edi), %ecx
+; FALLBACK22-NEXT: movl 16(%edx), %ecx
; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: shrl %ecx
; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK22-NEXT: movl 28(%edi), %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK22-NEXT: movl 20(%edx), %ecx
+; FALLBACK22-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %eax, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 32(%edi), %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: movl 36(%edi), %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: movl 24(%edx), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: movl 28(%edx), %esi
+; FALLBACK22-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %eax, %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 40(%edi), %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 32(%edx), %ecx
; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: shrl %ecx
; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK22-NEXT: movl 44(%edi), %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK22-NEXT: movl 36(%edx), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, %eax
; FALLBACK22-NEXT: shrl %esi
; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %eax, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 48(%edi), %esi
+; FALLBACK22-NEXT: orl %ebp, %esi
; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 40(%edx), %edi
+; FALLBACK22-NEXT: movl %edi, %esi
; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK22-NEXT: movl 52(%edi), %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK22-NEXT: movl 44(%edx), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK22-NEXT: movl %eax, %esi
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK22-NEXT: orl %eax, %ebp
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl 48(%edx), %ebp
+; FALLBACK22-NEXT: movl %ebp, %edi
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 52(%edx), %ecx
+; FALLBACK22-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK22-NEXT: movl %esi, %ebp
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: negl %eax
-; FALLBACK22-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK22-NEXT: movl 56(%edi), %eax
-; FALLBACK22-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %edx, %esi
; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: orl %eax, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK22-NEXT: movl %edx, (%eax)
-; FALLBACK22-NEXT: movl %esi, 56(%eax)
-; FALLBACK22-NEXT: movl %ecx, 60(%eax)
-; FALLBACK22-NEXT: movl %ebp, 48(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 52(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 40(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 44(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 32(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 36(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 24(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 28(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 16(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 20(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 8(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 12(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 4(%eax)
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK22-NEXT: orl %edi, %esi
+; FALLBACK22-NEXT: movl 56(%edx), %edi
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK22-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK22-NEXT: orl %ecx, %eax
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK22-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK22-NEXT: negl %ebx
+; FALLBACK22-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK22-NEXT: orl %ecx, %ebx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %edi, (%edx)
+; FALLBACK22-NEXT: movl %eax, 56(%edx)
+; FALLBACK22-NEXT: movl %ebx, 60(%edx)
+; FALLBACK22-NEXT: movl %esi, 48(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 52(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 40(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 44(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 32(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 36(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 24(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 28(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 16(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 8(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 12(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
; FALLBACK22-NEXT: addl $204, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
@@ -18943,144 +18908,150 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: leal (,%eax,8), %edx
-; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: leal (,%eax,8), %ebx
+; FALLBACK26-NEXT: andl $24, %ebx
+; FALLBACK26-NEXT: movl %ebx, %ecx
; FALLBACK26-NEXT: andl $60, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK26-NEXT: subl %eax, %edi
-; FALLBACK26-NEXT: movl (%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 4(%edi), %eax
+; FALLBACK26-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: subl %eax, %edx
+; FALLBACK26-NEXT: movl (%edx), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 4(%edx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl %edx, %ebx
; FALLBACK26-NEXT: notb %bl
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK26-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK26-NEXT: orl %ecx, %esi
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: shlxl %ecx, %eax, %esi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 8(%edx), %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 8(%edi), %esi
-; FALLBACK26-NEXT: movl %esi, %ecx
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK26-NEXT: movl 12(%edi), %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
-; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: orl %esi, %eax
-; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 16(%edi), %eax
-; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: movl 20(%edi), %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: movl 12(%edx), %esi
+; FALLBACK26-NEXT: shlxl %ecx, %esi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %ecx, %edi
+; FALLBACK26-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK26-NEXT: shrl %ecx
; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK26-NEXT: orl %eax, %ecx
; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 24(%edi), %ecx
+; FALLBACK26-NEXT: movl 16(%edx), %ecx
; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: shrl %ecx
; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK26-NEXT: movl 28(%edi), %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK26-NEXT: movl 20(%edx), %ecx
+; FALLBACK26-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %eax, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 32(%edi), %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: movl 36(%edi), %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: movl 24(%edx), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: movl 28(%edx), %esi
+; FALLBACK26-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %eax, %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 40(%edi), %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 32(%edx), %ecx
; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: shrl %ecx
; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK26-NEXT: movl 44(%edi), %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK26-NEXT: movl 36(%edx), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, %eax
; FALLBACK26-NEXT: shrl %esi
; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %eax, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 48(%edi), %esi
+; FALLBACK26-NEXT: orl %ebp, %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 40(%edx), %edi
+; FALLBACK26-NEXT: movl %edi, %esi
; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK26-NEXT: movl 52(%edi), %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK26-NEXT: movl 44(%edx), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK26-NEXT: movl %eax, %esi
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: shrl %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK26-NEXT: orl %eax, %ebp
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl 48(%edx), %ebp
+; FALLBACK26-NEXT: movl %ebp, %edi
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 52(%edx), %ecx
+; FALLBACK26-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK26-NEXT: movl %esi, %ebp
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: negl %eax
-; FALLBACK26-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK26-NEXT: movl 56(%edi), %eax
-; FALLBACK26-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %edx, %esi
; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: orl %eax, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK26-NEXT: movl %edx, (%eax)
-; FALLBACK26-NEXT: movl %esi, 56(%eax)
-; FALLBACK26-NEXT: movl %ecx, 60(%eax)
-; FALLBACK26-NEXT: movl %ebp, 48(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 52(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 40(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 44(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 32(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 36(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 24(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 28(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 16(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 20(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 8(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 12(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 4(%eax)
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK26-NEXT: orl %edi, %esi
+; FALLBACK26-NEXT: movl 56(%edx), %edi
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK26-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK26-NEXT: orl %ecx, %eax
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK26-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK26-NEXT: negl %ebx
+; FALLBACK26-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK26-NEXT: orl %ecx, %ebx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %edi, (%edx)
+; FALLBACK26-NEXT: movl %eax, 56(%edx)
+; FALLBACK26-NEXT: movl %ebx, 60(%edx)
+; FALLBACK26-NEXT: movl %esi, 48(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 52(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 40(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 44(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 32(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 36(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 24(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 28(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 16(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 8(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 12(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
; FALLBACK26-NEXT: addl $204, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
@@ -19531,144 +19502,150 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: leal (,%eax,8), %edx
-; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: leal (,%eax,8), %ebx
+; FALLBACK30-NEXT: andl $24, %ebx
+; FALLBACK30-NEXT: movl %ebx, %ecx
; FALLBACK30-NEXT: andl $60, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK30-NEXT: subl %eax, %edi
-; FALLBACK30-NEXT: movl (%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 4(%edi), %eax
+; FALLBACK30-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: subl %eax, %edx
+; FALLBACK30-NEXT: movl (%edx), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 4(%edx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl %edx, %ebx
; FALLBACK30-NEXT: notb %bl
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK30-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK30-NEXT: orl %ecx, %esi
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: shlxl %ecx, %eax, %esi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 8(%edx), %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 8(%edi), %esi
-; FALLBACK30-NEXT: movl %esi, %ecx
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK30-NEXT: movl 12(%edi), %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
-; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: orl %esi, %eax
-; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 16(%edi), %eax
-; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: movl 20(%edi), %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: movl 12(%edx), %esi
+; FALLBACK30-NEXT: shlxl %ecx, %esi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %ecx, %edi
+; FALLBACK30-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK30-NEXT: shrl %ecx
; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK30-NEXT: orl %eax, %ecx
; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 24(%edi), %ecx
+; FALLBACK30-NEXT: movl 16(%edx), %ecx
; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: shrl %ecx
; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK30-NEXT: movl 28(%edi), %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK30-NEXT: movl 20(%edx), %ecx
+; FALLBACK30-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %eax, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 32(%edi), %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: movl 36(%edi), %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: movl 24(%edx), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: movl 28(%edx), %esi
+; FALLBACK30-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %eax, %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 40(%edi), %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 32(%edx), %ecx
; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: shrl %ecx
; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK30-NEXT: movl 44(%edi), %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK30-NEXT: movl 36(%edx), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, %eax
; FALLBACK30-NEXT: shrl %esi
; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %eax, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 48(%edi), %esi
+; FALLBACK30-NEXT: orl %ebp, %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 40(%edx), %edi
+; FALLBACK30-NEXT: movl %edi, %esi
; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK30-NEXT: movl 52(%edi), %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK30-NEXT: movl 44(%edx), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK30-NEXT: movl %eax, %esi
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: shrl %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK30-NEXT: orl %eax, %ebp
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl 48(%edx), %ebp
+; FALLBACK30-NEXT: movl %ebp, %edi
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 52(%edx), %ecx
+; FALLBACK30-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK30-NEXT: movl %esi, %ebp
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: negl %eax
-; FALLBACK30-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK30-NEXT: movl 56(%edi), %eax
-; FALLBACK30-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %edx, %esi
; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: orl %eax, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK30-NEXT: movl %edx, (%eax)
-; FALLBACK30-NEXT: movl %esi, 56(%eax)
-; FALLBACK30-NEXT: movl %ecx, 60(%eax)
-; FALLBACK30-NEXT: movl %ebp, 48(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 52(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 40(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 44(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 32(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 36(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 24(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 28(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 16(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 20(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 8(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 12(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 4(%eax)
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK30-NEXT: orl %edi, %esi
+; FALLBACK30-NEXT: movl 56(%edx), %edi
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK30-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK30-NEXT: orl %ecx, %eax
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK30-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK30-NEXT: negl %ebx
+; FALLBACK30-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK30-NEXT: orl %ecx, %ebx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %edi, (%edx)
+; FALLBACK30-NEXT: movl %eax, 56(%edx)
+; FALLBACK30-NEXT: movl %ebx, 60(%edx)
+; FALLBACK30-NEXT: movl %esi, 48(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 52(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 40(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 44(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 32(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 36(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 24(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 28(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 16(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 8(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 12(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
; FALLBACK30-NEXT: addl $204, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
@@ -20336,10 +20313,8 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK2-LABEL: ashr_64bytes:
; FALLBACK2: # %bb.0:
-; FALLBACK2-NEXT: pushq %rbp
; FALLBACK2-NEXT: pushq %r15
; FALLBACK2-NEXT: pushq %r14
-; FALLBACK2-NEXT: pushq %r13
; FALLBACK2-NEXT: pushq %r12
; FALLBACK2-NEXT: pushq %rbx
; FALLBACK2-NEXT: pushq %rax
@@ -20371,60 +20346,58 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: leal (,%rax,8), %ecx
; FALLBACK2-NEXT: andl $56, %ecx
+; FALLBACK2-NEXT: movl %ecx, %esi
; FALLBACK2-NEXT: andl $56, %eax
-; FALLBACK2-NEXT: movq -120(%rsp,%rax), %rdi
-; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r9
-; FALLBACK2-NEXT: shrxq %rcx, %rdi, %rbx
-; FALLBACK2-NEXT: shrxq %rcx, -128(%rsp,%rax), %r13
-; FALLBACK2-NEXT: movq -104(%rsp,%rax), %rsi
-; FALLBACK2-NEXT: shrxq %rcx, %rsi, %r8
-; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r10
-; FALLBACK2-NEXT: shrxq %rcx, %r9, %r11
-; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r14
-; FALLBACK2-NEXT: shrxq %rcx, %r14, %r15
-; FALLBACK2-NEXT: shrxq %rcx, %r10, %rbp
-; FALLBACK2-NEXT: movl %ecx, %r12d
-; FALLBACK2-NEXT: notb %r12b
-; FALLBACK2-NEXT: addq %r9, %r9
-; FALLBACK2-NEXT: shlxq %r12, %r9, %r9
+; FALLBACK2-NEXT: movq -120(%rsp,%rax), %r8
+; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r8, %r9
+; FALLBACK2-NEXT: notb %cl
+; FALLBACK2-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rsi, -128(%rsp,%rax), %r9
+; FALLBACK2-NEXT: addq %r8, %r8
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK2-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK2-NEXT: leaq (%r14,%r14), %r9
+; FALLBACK2-NEXT: shlxq %rcx, %r9, %r9
; FALLBACK2-NEXT: orq %rbx, %r9
-; FALLBACK2-NEXT: addq %rdi, %rdi
-; FALLBACK2-NEXT: shlxq %r12, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r13, %rdi
-; FALLBACK2-NEXT: movq -80(%rsp,%rax), %rbx
-; FALLBACK2-NEXT: shrxq %rcx, %rbx, %r13
-; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK2-NEXT: sarxq %rcx, %rax, %rcx
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK2-NEXT: addq %r11, %r11
+; FALLBACK2-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK2-NEXT: orq %r10, %r11
+; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %rbx
+; FALLBACK2-NEXT: movq -80(%rsp,%rax), %r15
+; FALLBACK2-NEXT: leaq (%r15,%r15), %r12
+; FALLBACK2-NEXT: shlxq %rcx, %r12, %r12
+; FALLBACK2-NEXT: orq %rbx, %r12
+; FALLBACK2-NEXT: shrxq %rsi, %r14, %rbx
; FALLBACK2-NEXT: addq %r10, %r10
-; FALLBACK2-NEXT: shlxq %r12, %r10, %r10
-; FALLBACK2-NEXT: orq %r8, %r10
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %r12, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r11, %rsi
-; FALLBACK2-NEXT: leaq (%rbx,%rbx), %r8
-; FALLBACK2-NEXT: shlxq %r12, %r8, %r8
-; FALLBACK2-NEXT: orq %r15, %r8
-; FALLBACK2-NEXT: addq %r14, %r14
-; FALLBACK2-NEXT: shlxq %r12, %r14, %r11
-; FALLBACK2-NEXT: orq %rbp, %r11
-; FALLBACK2-NEXT: addq %rax, %rax
-; FALLBACK2-NEXT: shlxq %r12, %rax, %rax
-; FALLBACK2-NEXT: orq %r13, %rax
-; FALLBACK2-NEXT: movq %rcx, 56(%rdx)
-; FALLBACK2-NEXT: movq %rax, 48(%rdx)
-; FALLBACK2-NEXT: movq %r11, 32(%rdx)
-; FALLBACK2-NEXT: movq %r8, 40(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 16(%rdx)
-; FALLBACK2-NEXT: movq %r10, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, (%rdx)
-; FALLBACK2-NEXT: movq %r9, 8(%rdx)
+; FALLBACK2-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK2-NEXT: orq %rbx, %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r15, %rbx
+; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK2-NEXT: leaq (%rax,%rax), %r14
+; FALLBACK2-NEXT: shlxq %rcx, %r14, %rcx
+; FALLBACK2-NEXT: orq %rbx, %rcx
+; FALLBACK2-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK2-NEXT: movq %rax, 56(%rdx)
+; FALLBACK2-NEXT: movq %rcx, 48(%rdx)
+; FALLBACK2-NEXT: movq %r10, 32(%rdx)
+; FALLBACK2-NEXT: movq %r12, 40(%rdx)
+; FALLBACK2-NEXT: movq %r11, 16(%rdx)
+; FALLBACK2-NEXT: movq %r9, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
; FALLBACK2-NEXT: addq $8, %rsp
; FALLBACK2-NEXT: popq %rbx
; FALLBACK2-NEXT: popq %r12
-; FALLBACK2-NEXT: popq %r13
; FALLBACK2-NEXT: popq %r14
; FALLBACK2-NEXT: popq %r15
-; FALLBACK2-NEXT: popq %rbp
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: ashr_64bytes:
@@ -20664,13 +20637,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK6-LABEL: ashr_64bytes:
; FALLBACK6: # %bb.0:
-; FALLBACK6-NEXT: pushq %rbp
; FALLBACK6-NEXT: pushq %r15
; FALLBACK6-NEXT: pushq %r14
; FALLBACK6-NEXT: pushq %r13
; FALLBACK6-NEXT: pushq %r12
; FALLBACK6-NEXT: pushq %rbx
-; FALLBACK6-NEXT: pushq %rax
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
@@ -20691,62 +20662,60 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: leal (,%rax,8), %esi
-; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: leal (,%rax,8), %ecx
+; FALLBACK6-NEXT: andl $56, %ecx
+; FALLBACK6-NEXT: movl %ecx, %esi
; FALLBACK6-NEXT: andl $56, %eax
-; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK6-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK6-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK6-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK6-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK6-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK6-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK6-NEXT: movl %esi, %ebx
-; FALLBACK6-NEXT: notb %bl
-; FALLBACK6-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK6-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK6-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK6-NEXT: orq %r11, %r8
-; FALLBACK6-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK6-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK6-NEXT: orq %r12, %r11
+; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK6-NEXT: notb %cl
+; FALLBACK6-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK6-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK6-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK6-NEXT: orq %r8, %rdi
+; FALLBACK6-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK6-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK6-NEXT: orq %rbx, %r8
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK6-NEXT: addq %r11, %r11
+; FALLBACK6-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK6-NEXT: orq %rbx, %r11
+; FALLBACK6-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK6-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK6-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK6-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK6-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK6-NEXT: orq %r15, %r13
+; FALLBACK6-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK6-NEXT: addq %rbx, %rbx
+; FALLBACK6-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK6-NEXT: orq %r14, %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK6-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK6-NEXT: sarxq %rsi, %rax, %rsi
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK6-NEXT: orq %r9, %rdi
-; FALLBACK6-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK6-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK6-NEXT: orq %r14, %r9
-; FALLBACK6-NEXT: addq %r10, %r10
-; FALLBACK6-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK6-NEXT: orq %r15, %r10
-; FALLBACK6-NEXT: addq %rax, %rax
-; FALLBACK6-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK6-NEXT: orq %r13, %rax
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK6-NEXT: orq %rbp, %rcx
-; FALLBACK6-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK6-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK6-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK6-NEXT: orq %r14, %r15
+; FALLBACK6-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK6-NEXT: orq %r10, %rcx
+; FALLBACK6-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK6-NEXT: movq %rax, 56(%rdx)
; FALLBACK6-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK6-NEXT: movq %rax, 48(%rdx)
-; FALLBACK6-NEXT: movq %r10, 32(%rdx)
-; FALLBACK6-NEXT: movq %r9, 40(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
-; FALLBACK6-NEXT: movq %r8, (%rdx)
-; FALLBACK6-NEXT: addq $8, %rsp
+; FALLBACK6-NEXT: movq %r15, 48(%rdx)
+; FALLBACK6-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK6-NEXT: movq %r13, 40(%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
; FALLBACK6-NEXT: popq %rbx
; FALLBACK6-NEXT: popq %r12
; FALLBACK6-NEXT: popq %r13
; FALLBACK6-NEXT: popq %r14
; FALLBACK6-NEXT: popq %r15
-; FALLBACK6-NEXT: popq %rbp
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: ashr_64bytes:
@@ -20979,13 +20948,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK10-LABEL: ashr_64bytes:
; FALLBACK10: # %bb.0:
-; FALLBACK10-NEXT: pushq %rbp
; FALLBACK10-NEXT: pushq %r15
; FALLBACK10-NEXT: pushq %r14
; FALLBACK10-NEXT: pushq %r13
; FALLBACK10-NEXT: pushq %r12
; FALLBACK10-NEXT: pushq %rbx
-; FALLBACK10-NEXT: pushq %rax
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
; FALLBACK10-NEXT: vmovups 32(%rdi), %xmm1
; FALLBACK10-NEXT: movq 48(%rdi), %rcx
@@ -21004,62 +20971,60 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: leal (,%rax,8), %esi
-; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: leal (,%rax,8), %ecx
+; FALLBACK10-NEXT: andl $56, %ecx
+; FALLBACK10-NEXT: movl %ecx, %esi
; FALLBACK10-NEXT: andl $56, %eax
-; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK10-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK10-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK10-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK10-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK10-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK10-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK10-NEXT: movl %esi, %ebx
-; FALLBACK10-NEXT: notb %bl
-; FALLBACK10-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK10-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK10-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK10-NEXT: orq %r11, %r8
-; FALLBACK10-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK10-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK10-NEXT: orq %r12, %r11
+; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK10-NEXT: notb %cl
+; FALLBACK10-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK10-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK10-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK10-NEXT: orq %r8, %rdi
+; FALLBACK10-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK10-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK10-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK10-NEXT: orq %rbx, %r8
+; FALLBACK10-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK10-NEXT: addq %r11, %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK10-NEXT: orq %rbx, %r11
+; FALLBACK10-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK10-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK10-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK10-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK10-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK10-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK10-NEXT: orq %r15, %r13
+; FALLBACK10-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK10-NEXT: addq %rbx, %rbx
+; FALLBACK10-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK10-NEXT: orq %r14, %rbx
+; FALLBACK10-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK10-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK10-NEXT: sarxq %rsi, %rax, %rsi
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK10-NEXT: orq %r9, %rdi
-; FALLBACK10-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK10-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK10-NEXT: orq %r14, %r9
-; FALLBACK10-NEXT: addq %r10, %r10
-; FALLBACK10-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK10-NEXT: orq %r15, %r10
-; FALLBACK10-NEXT: addq %rax, %rax
-; FALLBACK10-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK10-NEXT: orq %r13, %rax
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK10-NEXT: orq %rbp, %rcx
-; FALLBACK10-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK10-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK10-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK10-NEXT: orq %r14, %r15
+; FALLBACK10-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK10-NEXT: orq %r10, %rcx
+; FALLBACK10-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK10-NEXT: movq %rax, 56(%rdx)
; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK10-NEXT: movq %rax, 48(%rdx)
-; FALLBACK10-NEXT: movq %r10, 32(%rdx)
-; FALLBACK10-NEXT: movq %r9, 40(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %r8, (%rdx)
-; FALLBACK10-NEXT: addq $8, %rsp
+; FALLBACK10-NEXT: movq %r15, 48(%rdx)
+; FALLBACK10-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK10-NEXT: movq %r13, 40(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
; FALLBACK10-NEXT: popq %rbx
; FALLBACK10-NEXT: popq %r12
; FALLBACK10-NEXT: popq %r13
; FALLBACK10-NEXT: popq %r14
; FALLBACK10-NEXT: popq %r15
-; FALLBACK10-NEXT: popq %rbp
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -21292,13 +21257,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK14-LABEL: ashr_64bytes:
; FALLBACK14: # %bb.0:
-; FALLBACK14-NEXT: pushq %rbp
; FALLBACK14-NEXT: pushq %r15
; FALLBACK14-NEXT: pushq %r14
; FALLBACK14-NEXT: pushq %r13
; FALLBACK14-NEXT: pushq %r12
; FALLBACK14-NEXT: pushq %rbx
-; FALLBACK14-NEXT: pushq %rax
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
; FALLBACK14-NEXT: vmovups 32(%rdi), %xmm1
; FALLBACK14-NEXT: movq 48(%rdi), %rcx
@@ -21317,62 +21280,60 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: leal (,%rax,8), %esi
-; FALLBACK14-NEXT: andl $56, %esi
+; FALLBACK14-NEXT: leal (,%rax,8), %ecx
+; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: movl %ecx, %esi
; FALLBACK14-NEXT: andl $56, %eax
-; FALLBACK14-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK14-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK14-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK14-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK14-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK14-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK14-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK14-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK14-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK14-NEXT: movl %esi, %ebx
-; FALLBACK14-NEXT: notb %bl
-; FALLBACK14-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK14-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK14-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK14-NEXT: orq %r11, %r8
-; FALLBACK14-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK14-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK14-NEXT: orq %r12, %r11
+; FALLBACK14-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK14-NEXT: notb %cl
+; FALLBACK14-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK14-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK14-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK14-NEXT: orq %r8, %rdi
+; FALLBACK14-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK14-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK14-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK14-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK14-NEXT: orq %rbx, %r8
+; FALLBACK14-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK14-NEXT: addq %r11, %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK14-NEXT: orq %rbx, %r11
+; FALLBACK14-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK14-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK14-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK14-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK14-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK14-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK14-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK14-NEXT: orq %r15, %r13
+; FALLBACK14-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK14-NEXT: addq %rbx, %rbx
+; FALLBACK14-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK14-NEXT: orq %r14, %rbx
+; FALLBACK14-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK14-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK14-NEXT: sarxq %rsi, %rax, %rsi
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK14-NEXT: orq %r9, %rdi
-; FALLBACK14-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK14-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK14-NEXT: orq %r14, %r9
-; FALLBACK14-NEXT: addq %r10, %r10
-; FALLBACK14-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK14-NEXT: orq %r15, %r10
-; FALLBACK14-NEXT: addq %rax, %rax
-; FALLBACK14-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK14-NEXT: orq %r13, %rax
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK14-NEXT: orq %rbp, %rcx
-; FALLBACK14-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK14-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK14-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK14-NEXT: orq %r14, %r15
+; FALLBACK14-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK14-NEXT: orq %r10, %rcx
+; FALLBACK14-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK14-NEXT: movq %rax, 56(%rdx)
; FALLBACK14-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK14-NEXT: movq %rax, 48(%rdx)
-; FALLBACK14-NEXT: movq %r10, 32(%rdx)
-; FALLBACK14-NEXT: movq %r9, 40(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %r8, (%rdx)
-; FALLBACK14-NEXT: addq $8, %rsp
+; FALLBACK14-NEXT: movq %r15, 48(%rdx)
+; FALLBACK14-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK14-NEXT: movq %r13, 40(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
; FALLBACK14-NEXT: popq %rbx
; FALLBACK14-NEXT: popq %r12
; FALLBACK14-NEXT: popq %r13
; FALLBACK14-NEXT: popq %r14
; FALLBACK14-NEXT: popq %r15
-; FALLBACK14-NEXT: popq %rbp
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -21960,111 +21921,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, %ecx
; FALLBACK18-NEXT: leal (,%eax,8), %edx
; FALLBACK18-NEXT: andl $24, %edx
+; FALLBACK18-NEXT: movl %edx, %ebx
; FALLBACK18-NEXT: andl $60, %ecx
; FALLBACK18-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK18-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %edx, %ebx
-; FALLBACK18-NEXT: notb %bl
+; FALLBACK18-NEXT: notb %dl
; FALLBACK18-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %ecx, %ebp
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
; FALLBACK18-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK18-NEXT: movl %ecx, %edi
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %ecx, %esi
-; FALLBACK18-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK18-NEXT: orl %edi, %ecx
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK18-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK18-NEXT: orl %ebp, %esi
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK18-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK18-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK18-NEXT: orl %eax, %ebx
+; FALLBACK18-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK18-NEXT: orl %ebp, %ecx
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK18-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %edx
+; FALLBACK18-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %edx, 60(%eax)
-; FALLBACK18-NEXT: movl %ebx, 56(%eax)
-; FALLBACK18-NEXT: movl %edi, 48(%eax)
-; FALLBACK18-NEXT: movl %ecx, 52(%eax)
-; FALLBACK18-NEXT: movl %esi, 40(%eax)
+; FALLBACK18-NEXT: movl %edi, 60(%eax)
+; FALLBACK18-NEXT: movl %edx, 56(%eax)
+; FALLBACK18-NEXT: movl %ecx, 48(%eax)
+; FALLBACK18-NEXT: movl %esi, 52(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 40(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK18-NEXT: movl %ecx, 44(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -22664,111 +22626,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl %eax, %ecx
; FALLBACK22-NEXT: leal (,%eax,8), %edx
; FALLBACK22-NEXT: andl $24, %edx
+; FALLBACK22-NEXT: movl %edx, %ebx
; FALLBACK22-NEXT: andl $60, %ecx
; FALLBACK22-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK22-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %edx, %ebx
-; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: notb %dl
; FALLBACK22-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK22-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %ecx, %ebp
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
; FALLBACK22-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl %ecx, %edi
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK22-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK22-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK22-NEXT: orl %edi, %ecx
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK22-NEXT: orl %ecx, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK22-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK22-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK22-NEXT: orl %ebp, %esi
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %eax, %eax
-; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK22-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK22-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK22-NEXT: addl %ebp, %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK22-NEXT: orl %eax, %ebx
+; FALLBACK22-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK22-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK22-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: orl %edi, %edx
+; FALLBACK22-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK22-NEXT: movl %edx, 60(%eax)
-; FALLBACK22-NEXT: movl %ebx, 56(%eax)
-; FALLBACK22-NEXT: movl %edi, 48(%eax)
-; FALLBACK22-NEXT: movl %ecx, 52(%eax)
-; FALLBACK22-NEXT: movl %esi, 40(%eax)
+; FALLBACK22-NEXT: movl %edi, 60(%eax)
+; FALLBACK22-NEXT: movl %edx, 56(%eax)
+; FALLBACK22-NEXT: movl %ecx, 48(%eax)
+; FALLBACK22-NEXT: movl %esi, 52(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 40(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK22-NEXT: movl %ecx, 44(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -23326,111 +23289,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl %eax, %ecx
; FALLBACK26-NEXT: leal (,%eax,8), %edx
; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: movl %edx, %ebx
; FALLBACK26-NEXT: andl $60, %ecx
; FALLBACK26-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK26-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl %edx, %ebx
-; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: notb %dl
; FALLBACK26-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK26-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK26-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl %ecx, %ebp
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
; FALLBACK26-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK26-NEXT: movl %ecx, %edi
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK26-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK26-NEXT: orl %edi, %ecx
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK26-NEXT: orl %ecx, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK26-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK26-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK26-NEXT: orl %ebp, %esi
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %eax, %eax
-; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK26-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK26-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK26-NEXT: addl %ebp, %ebp
-; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK26-NEXT: orl %eax, %ebx
+; FALLBACK26-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK26-NEXT: orl %ebp, %ecx
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK26-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK26-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: orl %edi, %edx
+; FALLBACK26-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK26-NEXT: movl %edx, 60(%eax)
-; FALLBACK26-NEXT: movl %ebx, 56(%eax)
-; FALLBACK26-NEXT: movl %edi, 48(%eax)
-; FALLBACK26-NEXT: movl %ecx, 52(%eax)
-; FALLBACK26-NEXT: movl %esi, 40(%eax)
+; FALLBACK26-NEXT: movl %edi, 60(%eax)
+; FALLBACK26-NEXT: movl %edx, 56(%eax)
+; FALLBACK26-NEXT: movl %ecx, 48(%eax)
+; FALLBACK26-NEXT: movl %esi, 52(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 40(%eax)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK26-NEXT: movl %ecx, 44(%eax)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -23988,111 +23952,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl %eax, %ecx
; FALLBACK30-NEXT: leal (,%eax,8), %edx
; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: movl %edx, %ebx
; FALLBACK30-NEXT: andl $60, %ecx
; FALLBACK30-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK30-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl %edx, %ebx
-; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: notb %dl
; FALLBACK30-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK30-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK30-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl %ecx, %ebp
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
; FALLBACK30-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK30-NEXT: movl %ecx, %edi
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK30-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK30-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK30-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK30-NEXT: orl %edi, %ecx
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK30-NEXT: orl %ecx, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK30-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK30-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK30-NEXT: orl %ebp, %esi
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %eax, %eax
-; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK30-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK30-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK30-NEXT: addl %ebp, %ebp
-; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK30-NEXT: orl %eax, %ebx
+; FALLBACK30-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK30-NEXT: orl %ebp, %ecx
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK30-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK30-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: orl %edi, %edx
+; FALLBACK30-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK30-NEXT: movl %edx, 60(%eax)
-; FALLBACK30-NEXT: movl %ebx, 56(%eax)
-; FALLBACK30-NEXT: movl %edi, 48(%eax)
-; FALLBACK30-NEXT: movl %ecx, 52(%eax)
-; FALLBACK30-NEXT: movl %esi, 40(%eax)
+; FALLBACK30-NEXT: movl %edi, 60(%eax)
+; FALLBACK30-NEXT: movl %edx, 56(%eax)
+; FALLBACK30-NEXT: movl %ecx, 48(%eax)
+; FALLBACK30-NEXT: movl %esi, 52(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 40(%eax)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK30-NEXT: movl %ecx, 44(%eax)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
diff --git a/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
index 338e104..221a51e 100644
--- a/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
@@ -712,33 +712,33 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%esi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%edi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 4(%edx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -994,42 +994,42 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %al, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%esp,%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%esp,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%esi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, 28(%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, 28(%esp,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%esi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 12(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -1297,33 +1297,33 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%esi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%edi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %ecx, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 4(%edx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -1487,31 +1487,31 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rsi,8), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %sil, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -72(%rsp,%rsi,8), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rsi,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rsi, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: lshr_32bytes:
@@ -1761,88 +1761,90 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 32(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %eax, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 28(%edi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%edi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 16(%edi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 20(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -2040,32 +2042,32 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: negb %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movsbq %cl, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: negb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movsbq %sil, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rdi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rdi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -16(%rsp,%rdi), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rdi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r8, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rdi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rsi, %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rsi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, -16(%rsp,%rdi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rdi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rdi, %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
@@ -2319,97 +2321,101 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $28, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %cl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $28, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %dl, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 64(%esp,%esi), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%esi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%esi), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%esi), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%esi), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, %edi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebp), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebp), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edx, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, 92(%esp,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%esi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, 92(%esp,%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 28(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 24(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 28(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 20(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -2610,31 +2616,31 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rsi,8), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rax, %rsi, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %sil, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -72(%rsp,%rsi,8), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rsi,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rcx, %rsi, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: ashr_32bytes:
@@ -2927,60 +2933,59 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 32(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 32(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %eax, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %ecx, %esi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 24(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 16(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 20(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%esi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -3263,13 +3268,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: lshr_64bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -3292,65 +3295,63 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rax), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %rbp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r11,%r11), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rbx, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rax, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r12
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r12,%r12), %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r13, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r11, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 56(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r12, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r15, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: lshr_64bytes:
@@ -3868,20 +3869,20 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -3906,116 +3907,117 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ebx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ecx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ebx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ecx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 52(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 56(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 48(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -4388,10 +4390,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: shl_64bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
@@ -4419,63 +4419,61 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %esi
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
; X64-HAVE-BMI2-NO-SHLD-NEXT: negl %esi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movslq %esi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi), %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rsi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi), %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r14, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rsi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r8, %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r12
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %r13d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %r13b
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r10, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rsi), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %rbp
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r14, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -8(%rsp,%rsi), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rsi), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rdi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r9, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rsi), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r11, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rsi), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rbx, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rsi), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r15, %r12
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r15, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r12, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, -8(%rsp,%rsi), %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rsi, %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r12, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 48(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rbx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 56(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 32(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r14, 24(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r15, 40(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: shl_64bytes:
@@ -4972,33 +4970,33 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $204, %esp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%ebp), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%ebp), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%ebp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%ebp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%ebp), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%ebp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ebp), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
@@ -5011,7 +5009,7 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -5032,149 +5030,152 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, (%esp), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edi), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edx), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edx), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, 188(%esp,%ecx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 36(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 56(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 60(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 48(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 52(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 40(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 44(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 32(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 36(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 24(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 16(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 20(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%edx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -5534,13 +5535,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: ashr_64bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -5567,65 +5566,63 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rax), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %rbp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r11,%r11), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rbx, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rcx, %rax, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r12
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r12,%r12), %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r13, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r11, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 56(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r12, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r15, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: ashr_64bytes:
@@ -6221,33 +6218,31 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ebx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
@@ -6256,87 +6251,84 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ebx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %edx, %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 52(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 56(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 48(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
index c3054a3..6b5c604 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
@@ -1635,22 +1635,22 @@ define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rdi, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rcx,8), %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rcx,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rcx,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
@@ -1807,40 +1807,43 @@ define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 16(%esp,%ecx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%esi,4), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%esi,4), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ecx,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $92, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -1906,13 +1909,13 @@ define void @load_1byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-BMI2-NEXT: leal (,%rsi,8), %eax
; X64-BMI2-NEXT: andl $56, %eax
-; X64-BMI2-NEXT: andl $56, %esi
-; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
-; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: movl %eax, %ecx
; X64-BMI2-NEXT: notl %eax
-; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
-; X64-BMI2-NEXT: addl %esi, %esi
-; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %edi
+; X64-BMI2-NEXT: addl %edi, %edi
+; X64-BMI2-NEXT: shlxq %rax, %rdi, %rax
+; X64-BMI2-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rcx
; X64-BMI2-NEXT: orl %eax, %ecx
; X64-BMI2-NEXT: movb %cl, (%rdx)
; X64-BMI2-NEXT: popq %rax
@@ -2070,13 +2073,13 @@ define void @load_2byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-BMI2-NEXT: leal (,%rsi,8), %eax
; X64-BMI2-NEXT: andl $56, %eax
-; X64-BMI2-NEXT: andl $56, %esi
-; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
-; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: movl %eax, %ecx
; X64-BMI2-NEXT: notl %eax
-; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
-; X64-BMI2-NEXT: addl %esi, %esi
-; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %edi
+; X64-BMI2-NEXT: addl %edi, %edi
+; X64-BMI2-NEXT: shlxq %rax, %rdi, %rax
+; X64-BMI2-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rcx
; X64-BMI2-NEXT: orl %eax, %ecx
; X64-BMI2-NEXT: movw %cx, (%rdx)
; X64-BMI2-NEXT: popq %rax
@@ -2233,13 +2236,13 @@ define void @load_4byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-BMI2-NEXT: leal (,%rsi,8), %eax
; X64-BMI2-NEXT: andl $56, %eax
-; X64-BMI2-NEXT: andl $56, %esi
-; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
-; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: movl %eax, %ecx
; X64-BMI2-NEXT: notl %eax
-; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
-; X64-BMI2-NEXT: addl %esi, %esi
-; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %edi
+; X64-BMI2-NEXT: addl %edi, %edi
+; X64-BMI2-NEXT: shlxq %rax, %rdi, %rax
+; X64-BMI2-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rcx
; X64-BMI2-NEXT: orl %eax, %ecx
; X64-BMI2-NEXT: movl %ecx, (%rdx)
; X64-BMI2-NEXT: popq %rax
@@ -2521,10 +2524,11 @@ define void @load_8byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
;
; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $128, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $140, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -2541,25 +2545,26 @@ define void @load_8byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%ecx,8), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, (%esp,%ecx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $dl killed $dl killed $edx def $edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, (%esp,%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ecx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $128, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $140, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -2667,21 +2672,21 @@ define void @load_16byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X64-HAVE-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx def $rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, -128(%rsp,%rsi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r10, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r9, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
@@ -2860,33 +2865,33 @@ define void @load_16byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%eax,8), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, 16(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ebp, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 8(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $156, %esp
@@ -3026,9 +3031,7 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
; X64-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
@@ -3043,38 +3046,36 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X64-HAVE-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rsi), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx def $rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, -128(%rsp,%rsi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r9, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rbx,%rbx), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r9, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r10,%r10), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r10, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rsi), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r10,%r10), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rbx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r10, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rsi), %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
@@ -3304,7 +3305,7 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $172, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
@@ -3320,59 +3321,60 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%eax,8), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, 32(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%eax), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%eax), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 64(%esp,%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 24(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 20(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
@@ -3380,7 +3382,7 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $172, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
index 7735500..bed8e58 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
@@ -1879,22 +1879,22 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rdi, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rcx,8), %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rcx,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rcx,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
@@ -2055,40 +2055,43 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 16(%esp,%ecx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%esi,4), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%esi,4), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ecx,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $92, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
index 4d261a9..9fbbba2 100644
--- a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
@@ -820,7 +820,7 @@ define void @infiniteloop() {
; ENABLE-NEXT: movq %rsp, %rax
; ENABLE-NEXT: addq $-16, %rax
; ENABLE-NEXT: movq %rax, %rsp
-; ENABLE-NEXT: xorl %ecx, %ecx
+; ENABLE-NEXT: xorl %ecx, %ecx
; ENABLE-NEXT: .p2align 4
; ENABLE-NEXT: LBB10_2: ## %for.body
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
@@ -851,8 +851,8 @@ define void @infiniteloop() {
; DISABLE-NEXT: ## %bb.1: ## %if.then
; DISABLE-NEXT: movq %rsp, %rax
; DISABLE-NEXT: addq $-16, %rax
-; DISABLE-NEXT: %rax, %rsp
-; DISABLE-NEXT: xorl %ecx, %ecx
+; DISABLE-NEXT: movq %rax, %rsp
+; DISABLE-NEXT: xorl %ecx, %ecx
; DISABLE-NEXT: .p2align 4
; DISABLE-NEXT: LBB10_2: ## %for.body
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
@@ -1185,10 +1185,10 @@ define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
; ENABLE-NEXT: .p2align 4
; ENABLE-NEXT: LBB14_2: ## %for.body
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
-; ENABLE-NEXT: cmpl %esi, %edi
-; ENABLE-NEXT: setl %al
+; ENABLE-NEXT: movl %esi, %eax
; ENABLE-NEXT: xorl %esi, %esi
-; ENABLE-NEXT: movb %al, %sil
+; ENABLE-NEXT: cmpl %eax, %edi
+; ENABLE-NEXT: setl %sil
; ENABLE-NEXT: incb %dl
; ENABLE-NEXT: cmpb $45, %dl
; ENABLE-NEXT: jl LBB14_2
@@ -1220,10 +1220,10 @@ define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
; DISABLE-NEXT: .p2align 4
; DISABLE-NEXT: LBB14_2: ## %for.body
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
-; DISABLE-NEXT: cmpl %esi, %edi
-; DISABLE-NEXT: setl %al
+; DISABLE-NEXT: movl %esi, %eax
; DISABLE-NEXT: xorl %esi, %esi
-; DISABLE-NEXT: movb %al, %sil
+; DISABLE-NEXT: cmpl %eax, %edi
+; DISABLE-NEXT: setl %sil
; DISABLE-NEXT: incb %dl
; DISABLE-NEXT: cmpb $45, %dl
; DISABLE-NEXT: jl LBB14_2
diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll
index 2bef668..59fbf71 100644
--- a/llvm/test/CodeGen/X86/xor.ll
+++ b/llvm/test/CodeGen/X86/xor.ll
@@ -62,12 +62,12 @@ define i32 @test4(i32 %a, i32 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB3_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: notl %edx
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: addl %edx, %edx
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: notl %ecx
+; X86-NEXT: andl %edx, %ecx
+; X86-NEXT: addl %ecx, %ecx
; X86-NEXT: jne .LBB3_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: retl
@@ -78,12 +78,12 @@ define i32 @test4(i32 %a, i32 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB3_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-LIN-NEXT: movl %esi, %ecx
; X64-LIN-NEXT: xorl %esi, %eax
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: notl %ecx
-; X64-LIN-NEXT: andl %esi, %ecx
-; X64-LIN-NEXT: addl %ecx, %ecx
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: notl %esi
+; X64-LIN-NEXT: andl %ecx, %esi
+; X64-LIN-NEXT: addl %esi, %esi
; X64-LIN-NEXT: jne .LBB3_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: retq
@@ -94,12 +94,12 @@ define i32 @test4(i32 %a, i32 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB3_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-WIN-NEXT: movl %edx, %ecx
; X64-WIN-NEXT: xorl %edx, %eax
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: notl %ecx
-; X64-WIN-NEXT: andl %edx, %ecx
-; X64-WIN-NEXT: addl %ecx, %ecx
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: notl %edx
+; X64-WIN-NEXT: andl %ecx, %edx
+; X64-WIN-NEXT: addl %edx, %edx
; X64-WIN-NEXT: jne .LBB3_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: retq
@@ -126,13 +126,13 @@ define i16 @test5(i16 %a, i16 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB4_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: notl %edx
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: addl %edx, %edx
-; X86-NEXT: testw %dx, %dx
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: xorl %edx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: notl %ecx
+; X86-NEXT: andl %edx, %ecx
+; X86-NEXT: addl %ecx, %ecx
+; X86-NEXT: testw %cx, %cx
; X86-NEXT: jne .LBB4_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: # kill: def $ax killed $ax killed $eax
@@ -144,13 +144,13 @@ define i16 @test5(i16 %a, i16 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB4_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-LIN-NEXT: xorl %esi, %eax
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: notl %ecx
-; X64-LIN-NEXT: andl %esi, %ecx
-; X64-LIN-NEXT: addl %ecx, %ecx
-; X64-LIN-NEXT: testw %cx, %cx
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %esi, %ecx
+; X64-LIN-NEXT: xorl %ecx, %eax
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: notl %esi
+; X64-LIN-NEXT: andl %ecx, %esi
+; X64-LIN-NEXT: addl %esi, %esi
+; X64-LIN-NEXT: testw %si, %si
; X64-LIN-NEXT: jne .LBB4_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: # kill: def $ax killed $ax killed $eax
@@ -163,13 +163,13 @@ define i16 @test5(i16 %a, i16 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB4_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-WIN-NEXT: xorl %edx, %eax
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: notl %ecx
-; X64-WIN-NEXT: andl %edx, %ecx
-; X64-WIN-NEXT: addl %ecx, %ecx
-; X64-WIN-NEXT: testw %cx, %cx
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %edx, %ecx
+; X64-WIN-NEXT: xorl %ecx, %eax
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: notl %edx
+; X64-WIN-NEXT: andl %ecx, %edx
+; X64-WIN-NEXT: addl %edx, %edx
+; X64-WIN-NEXT: testw %dx, %dx
; X64-WIN-NEXT: jne .LBB4_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: # kill: def $ax killed $ax killed $eax
@@ -197,12 +197,12 @@ define i8 @test6(i8 %a, i8 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB5_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: xorb %cl, %al
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: notb %dl
-; X86-NEXT: andb %cl, %dl
-; X86-NEXT: addb %dl, %dl
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: notb %cl
+; X86-NEXT: andb %dl, %cl
+; X86-NEXT: addb %cl, %cl
; X86-NEXT: jne .LBB5_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: retl
@@ -213,12 +213,12 @@ define i8 @test6(i8 %a, i8 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB5_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-LIN-NEXT: movl %esi, %ecx
; X64-LIN-NEXT: xorb %sil, %al
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: notb %cl
-; X64-LIN-NEXT: andb %sil, %cl
-; X64-LIN-NEXT: addb %cl, %cl
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: notb %sil
+; X64-LIN-NEXT: andb %cl, %sil
+; X64-LIN-NEXT: addb %sil, %sil
; X64-LIN-NEXT: jne .LBB5_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: # kill: def $al killed $al killed $eax
@@ -230,12 +230,12 @@ define i8 @test6(i8 %a, i8 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB5_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-WIN-NEXT: movl %edx, %ecx
; X64-WIN-NEXT: xorb %dl, %al
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: notb %cl
-; X64-WIN-NEXT: andb %dl, %cl
-; X64-WIN-NEXT: addb %cl, %cl
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: notb %dl
+; X64-WIN-NEXT: andb %cl, %dl
+; X64-WIN-NEXT: addb %dl, %dl
; X64-WIN-NEXT: jne .LBB5_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: retq
@@ -262,12 +262,12 @@ define i32 @test7(i32 %a, i32 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB6_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: xorl $2147483646, %edx # imm = 0x7FFFFFFE
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: addl %edx, %edx
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: xorl $2147483646, %ecx # imm = 0x7FFFFFFE
+; X86-NEXT: andl %edx, %ecx
+; X86-NEXT: addl %ecx, %ecx
; X86-NEXT: jne .LBB6_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: retl
@@ -278,12 +278,12 @@ define i32 @test7(i32 %a, i32 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB6_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-LIN-NEXT: movl %esi, %ecx
; X64-LIN-NEXT: xorl %esi, %eax
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: xorl $2147483646, %ecx # imm = 0x7FFFFFFE
-; X64-LIN-NEXT: andl %esi, %ecx
-; X64-LIN-NEXT: addl %ecx, %ecx
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: xorl $2147483646, %esi # imm = 0x7FFFFFFE
+; X64-LIN-NEXT: andl %ecx, %esi
+; X64-LIN-NEXT: addl %esi, %esi
; X64-LIN-NEXT: jne .LBB6_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: retq
@@ -294,12 +294,12 @@ define i32 @test7(i32 %a, i32 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB6_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-WIN-NEXT: movl %edx, %ecx
; X64-WIN-NEXT: xorl %edx, %eax
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: xorl $2147483646, %ecx # imm = 0x7FFFFFFE
-; X64-WIN-NEXT: andl %edx, %ecx
-; X64-WIN-NEXT: addl %ecx, %ecx
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: xorl $2147483646, %edx # imm = 0x7FFFFFFE
+; X64-WIN-NEXT: andl %ecx, %edx
+; X64-WIN-NEXT: addl %edx, %edx
; X64-WIN-NEXT: jne .LBB6_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: retq
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s
index c393d3e..3f6d8fe 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s
@@ -34,3 +34,83 @@ v_cvt_f32_bf16 v5, v1 div:2
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX1250-ERR-NEXT:{{^}}v_cvt_f32_bf16 v5, v1 div:2
// GFX1250-ERR-NEXT:{{^}} ^
+
+v_cos_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_cos_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_cos_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_cos_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_exp_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_exp_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_exp_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_exp_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_log_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_log_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_log_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_log_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rcp_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_rcp_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rcp_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_rcp_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rsq_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_rsq_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rsq_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_rsq_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sin_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_sin_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sin_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_sin_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sqrt_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_sqrt_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sqrt_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_sqrt_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_tanh_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_tanh_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_tanh_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_tanh_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
index 0931523..37ad6eb 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
@@ -3781,15 +3781,6 @@ v_tanh_bf16_e64 v5, null
v_tanh_bf16_e64 v5, -1
// GFX1250: v_tanh_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_tanh_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-
-v_tanh_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_tanh_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-
-v_tanh_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_prng_b32_e64 v5, v1
// GFX1250: v_prng_b32_e64 v5, v1 ; encoding: [0x05,0x00,0xcb,0xd5,0x01,0x01,0x00,0x00]
@@ -3862,15 +3853,6 @@ v_rcp_bf16_e64 v5, null
v_rcp_bf16_e64 v5, -1
// GFX1250: v_rcp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_rcp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rcp_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_rcp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rcp_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sqrt_bf16_e64 v5, v1
// GFX1250: v_sqrt_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00]
@@ -3907,15 +3889,6 @@ v_sqrt_bf16_e64 v5, null
v_sqrt_bf16_e64 v5, -1
// GFX1250: v_sqrt_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_sqrt_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sqrt_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_sqrt_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_rsq_bf16_e64 v5, v1
// GFX1250: v_rsq_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00]
@@ -3952,15 +3925,6 @@ v_rsq_bf16_e64 v5, null
v_rsq_bf16_e64 v5, -1
// GFX1250: v_rsq_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_rsq_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rsq_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_rsq_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rsq_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_log_bf16_e64 v5, v1
// GFX1250: v_log_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00]
@@ -3997,15 +3961,6 @@ v_log_bf16_e64 v5, null
v_log_bf16_e64 v5, -1
// GFX1250: v_log_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
-v_log_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_log_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-
-v_log_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_log_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-
-v_log_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_log_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_exp_bf16_e64 v5, v1
// GFX1250: v_exp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00]
@@ -4042,15 +3997,6 @@ v_exp_bf16_e64 v5, null
v_exp_bf16_e64 v5, -1
// GFX1250: v_exp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_exp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-
-v_exp_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_exp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-
-v_exp_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_exp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sin_bf16_e64 v5, v1
// GFX1250: v_sin_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00]
@@ -4087,15 +4033,6 @@ v_sin_bf16_e64 v5, null
v_sin_bf16_e64 v5, -1
// GFX1250: v_sin_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_sin_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sin_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_sin_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sin_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_sin_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_cos_bf16_e64 v5, v1
// GFX1250: v_cos_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00]
@@ -4132,15 +4069,6 @@ v_cos_bf16_e64 v5, null
v_cos_bf16_e64 v5, -1
// GFX1250: v_cos_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_cos_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-
-v_cos_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_cos_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-
-v_cos_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_cos_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_cvt_f32_bf16_e64 v5, v1
// GFX1250: v_cvt_f32_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
index 5ac9eb4..52f9ba3 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
@@ -3952,15 +3952,6 @@ v_tanh_bf16_e64 v5.l, null
v_tanh_bf16_e64 v5.l, -1
// GFX1250: v_tanh_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_tanh_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-
-v_tanh_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_tanh_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-
-v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_tanh_bf16 v5.l, v128.h
// GFX1250: v_tanh_bf16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0xca,0xd5,0x80,0x01,0x00,0x00]
@@ -4036,15 +4027,6 @@ v_rcp_bf16_e64 v5.l, null
v_rcp_bf16_e64 v5.l, -1
// GFX1250: v_rcp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_rcp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rcp_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_rcp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_rcp_bf16 v5.h, v128.h
// GFX1250: v_rcp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xf9,0xd5,0x80,0x01,0x00,0x00]
@@ -4084,15 +4066,6 @@ v_sqrt_bf16_e64 v5.l, null
v_sqrt_bf16_e64 v5.l, -1
// GFX1250: v_sqrt_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_sqrt_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sqrt_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_sqrt_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sqrt_bf16 v5.h, v128.h
// GFX1250: v_sqrt_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfa,0xd5,0x80,0x01,0x00,0x00]
@@ -4132,15 +4105,6 @@ v_rsq_bf16_e64 v5.l, null
v_rsq_bf16_e64 v5.l, -1
// GFX1250: v_rsq_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_rsq_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rsq_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_rsq_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_rsq_bf16 v5.h, v128.h
// GFX1250: v_rsq_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfb,0xd5,0x80,0x01,0x00,0x00]
@@ -4180,15 +4144,6 @@ v_log_bf16_e64 v5.l, null
v_log_bf16_e64 v5.l, -1
// GFX1250: v_log_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
-v_log_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_log_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-
-v_log_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_log_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-
-v_log_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_log_bf16 v5.h, v128.h
// GFX1250: v_log_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfc,0xd5,0x80,0x01,0x00,0x00]
@@ -4228,15 +4183,6 @@ v_exp_bf16_e64 v5.l, null
v_exp_bf16_e64 v5.l, -1
// GFX1250: v_exp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
-v_exp_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_exp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-
-v_exp_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_exp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-
-v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_exp_bf16 v5.h, v128.h
// GFX1250: v_exp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfd,0xd5,0x80,0x01,0x00,0x00]
@@ -4276,15 +4222,6 @@ v_sin_bf16_e64 v5.l, null
v_sin_bf16_e64 v5.l, -1
// GFX1250: v_sin_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
-v_sin_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_sin_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sin_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_sin_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sin_bf16 v5.h, v128.h
// GFX1250: v_sin_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfe,0xd5,0x80,0x01,0x00,0x00]
@@ -4324,15 +4261,6 @@ v_cos_bf16_e64 v5.l, null
v_cos_bf16_e64 v5.l, -1
// GFX1250: v_cos_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
-v_cos_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_cos_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-
-v_cos_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_cos_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-
-v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_cos_bf16_e64 v5.h, v128.h
// GFX1250: v_cos_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xff,0xd5,0x80,0x01,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
index b21fca6..21077fe 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
@@ -158,18 +158,6 @@ v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_prng_b32_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_prng_b32_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -258,18 +246,6 @@ v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -314,18 +290,6 @@ v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -370,18 +334,6 @@ v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -426,18 +378,6 @@ v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -482,18 +422,6 @@ v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -538,18 +466,6 @@ v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -594,18 +510,6 @@ v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
index d163856..646acf5 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
@@ -162,18 +162,6 @@ v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_tanh_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xca,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -266,18 +254,6 @@ v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rcp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -326,18 +302,6 @@ v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -386,18 +350,6 @@ v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -446,18 +398,6 @@ v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -506,18 +446,6 @@ v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -566,18 +494,6 @@ v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -626,18 +542,6 @@ v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xff,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
index 78afa10b..1907a93 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
@@ -38,18 +38,6 @@ v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -58,114 +46,30 @@ v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
index 6ec4d5f..35a51db 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
@@ -42,18 +42,6 @@ v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_tanh_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -66,18 +54,6 @@ v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rcp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -86,18 +62,6 @@ v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -106,18 +70,6 @@ v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -126,18 +78,6 @@ v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -146,18 +86,6 @@ v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -166,18 +94,6 @@ v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -186,18 +102,6 @@ v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
index 67747a6..0b39397 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
@@ -4123,18 +4123,10 @@
# GFX1250-REAL16: v_tanh_f16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0x9f,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_tanh_f16_e64 v5, v128 ; encoding: [0x05,0x00,0x9f,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_tanh_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00]
@@ -4159,10 +4151,6 @@
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_tanh_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00]
@@ -4223,18 +4211,10 @@
0x05,0x00,0xcb,0xd5,0x6a,0x00,0x00,0x00
# GFX1250: v_prng_b32_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xcb,0xd5,0x6a,0x00,0x00,0x00]
-0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_rcp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00]
@@ -4259,10 +4239,6 @@
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_rcp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00]
@@ -4287,18 +4263,10 @@
# GFX1250-REAL16: v_rcp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xf9,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xf9,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00]
@@ -4323,10 +4291,6 @@
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00]
@@ -4351,18 +4315,10 @@
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfa,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfa,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_rsq_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00]
@@ -4387,10 +4343,6 @@
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_rsq_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00]
@@ -4415,18 +4367,10 @@
# GFX1250-REAL16: v_rsq_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfb,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfb,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_log_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_log_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_log_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_log_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_log_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00]
@@ -4451,10 +4395,6 @@
# GFX1250-REAL16: v_log_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_log_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_log_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_log_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00]
@@ -4479,18 +4419,10 @@
# GFX1250-REAL16: v_log_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfc,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfc,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_exp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_exp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_exp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00]
@@ -4515,10 +4447,6 @@
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_exp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_exp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00]
@@ -4543,18 +4471,10 @@
# GFX1250-REAL16: v_exp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfd,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfd,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_sin_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_sin_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_sin_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00]
@@ -4579,10 +4499,6 @@
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_sin_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_sin_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00]
@@ -4607,18 +4523,10 @@
# GFX1250-REAL16: v_sin_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfe,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfe,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_cos_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_cos_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_cos_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00]
@@ -4643,10 +4551,6 @@
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_cos_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_cos_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
index 7c29f8a..8b26d2a 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
@@ -104,18 +104,6 @@
# GFX1250-REAL16: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_tanh_f16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -197,18 +185,6 @@
0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff
# GFX1250: v_prng_b32_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
-0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -257,18 +233,6 @@
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -317,18 +281,6 @@
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -377,18 +329,6 @@
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -437,18 +377,6 @@
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -497,18 +425,6 @@
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -557,18 +473,6 @@
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
index d26bc46..15f76c5 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
@@ -34,22 +34,10 @@
# GFX1250-REAL16: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_tanh_f16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
@@ -57,142 +45,58 @@
0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250: v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
index 9003072..dd347a7 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
@@ -19,9 +19,8 @@ define void @mulvl123_addressing(ptr %src, ptr %dst, i64 %count) #0 {
; COMMON-NEXT: ldr z3, [x0, #3, mul vl]
; COMMON-NEXT: addvl x0, x0, #5
; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b
-; COMMON-NEXT: movprfx z1, z2
-; COMMON-NEXT: umax z1.b, p0/m, z1.b, z3.b
-; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b
+; COMMON-NEXT: umax z2.b, p0/m, z2.b, z3.b
+; COMMON-NEXT: umax z0.b, p0/m, z0.b, z2.b
; COMMON-NEXT: st1b { z0.b }, p0, [x1, x8]
; COMMON-NEXT: incb x8
; COMMON-NEXT: cmp x8, x2
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index c12d813..082b876 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -234,16 +234,17 @@ define void @extrastride(ptr nocapture %main, i32 %main_stride, ptr nocapture %r
; X32-NEXT: .p2align 4
; X32-NEXT: .LBB2_2: # %for.body
; X32-NEXT: # =>This Inner Loop Header: Depth=1
-; X32-NEXT: movl (%ebx,%esi), %ebp
-; X32-NEXT: addl (%ebx), %ebp
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: addl (%esi,%ebx), %ebp
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: addl (%esi,%ebx), %ebp
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: addl (%esi,%ebx), %ebp
-; X32-NEXT: movl %ebp, (%edx)
-; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: movl %ebx, %ebp
+; X32-NEXT: movl (%ebx,%esi), %ebx
+; X32-NEXT: addl (%ebp), %ebx
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: addl (%esi,%ebp), %ebx
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: addl (%esi,%ebp), %ebx
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: addl (%esi,%ebp), %ebx
+; X32-NEXT: movl %ebx, (%edx)
+; X32-NEXT: leal (%ebp,%esi), %ebx
; X32-NEXT: addl %edi, %ebx
; X32-NEXT: addl %ecx, %edx
; X32-NEXT: decl %eax
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index 8d878f4..2f7e356 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -429,48 +429,36 @@ define i32 @header_mask_and_invariant_compare(ptr %A, ptr %B, ptr %C, ptr %D, pt
; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]]
; DEFAULT: [[VECTOR_BODY]]:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE37:.*]] ]
-; DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META8:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <4 x i32> poison, i32 [[TMP9]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT28]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP19:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META11:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP19]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP6:%.*]] = or <4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]]
-; DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META13:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT30]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP8:%.*]] = icmp ugt <4 x i32> [[BROADCAST_SPLAT31]], [[TMP6]]
+; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE33:.*]] ]
+; DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META8:![0-9]+]]
+; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META11:![0-9]+]]
+; DEFAULT-NEXT: [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP3]]
+; DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META13:![0-9]+]]
+; DEFAULT-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP5]]
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP7]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
; DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[D]], i64 [[INDEX]]
-; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
; DEFAULT: [[PRED_STORE_IF]]:
-; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP11]], ptr [[E]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META17:![0-9]+]]
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META17:![0-9]+]]
; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE]]
; DEFAULT: [[PRED_STORE_CONTINUE]]:
-; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33:.*]]
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF28:.*]], label %[[PRED_STORE_CONTINUE29:.*]]
+; DEFAULT: [[PRED_STORE_IF28]]:
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
+; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE29]]
+; DEFAULT: [[PRED_STORE_CONTINUE29]]:
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF30:.*]], label %[[PRED_STORE_CONTINUE31:.*]]
+; DEFAULT: [[PRED_STORE_IF30]]:
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
+; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE31]]
+; DEFAULT: [[PRED_STORE_CONTINUE31]]:
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33]]
; DEFAULT: [[PRED_STORE_IF32]]:
-; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP13]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE33]]
; DEFAULT: [[PRED_STORE_CONTINUE33]]:
-; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF34:.*]], label %[[PRED_STORE_CONTINUE35:.*]]
-; DEFAULT: [[PRED_STORE_IF34]]:
-; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP15]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE35]]
-; DEFAULT: [[PRED_STORE_CONTINUE35]]:
-; DEFAULT-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP21]], label %[[PRED_STORE_IF36:.*]], label %[[PRED_STORE_CONTINUE37]]
-; DEFAULT: [[PRED_STORE_IF36]]:
-; DEFAULT-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP22]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE37]]
-; DEFAULT: [[PRED_STORE_CONTINUE37]]:
-; DEFAULT-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP16]], <4 x i1> [[TMP8]]), !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]]
+; DEFAULT-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP16]], <4 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]]
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; DEFAULT-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; DEFAULT-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
index 5970608..bea34e2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
@@ -16,7 +16,7 @@
; CM: vector.ph:
; CM: CLONE ir<%a> = extractvalue ir<%sv>
; CM: CLONE ir<%b> = extractvalue ir<%sv>
-; CM: WIDEN ir<%add> = add ir<%a>, ir<%b>
+; CM: CLONE ir<%add> = add ir<%a>, ir<%b>
; CM: Successor(s): vector loop
; CM: LV: Scalar loop costs: 5.
@@ -30,23 +30,22 @@ define void @test1(ptr %dst, {i64, i64} %sv) {
; FORCED-NEXT: br label %[[VECTOR_PH:.*]]
; FORCED: [[VECTOR_PH]]:
; FORCED-NEXT: [[TMP0:%.*]] = extractvalue { i64, i64 } [[SV]], 0
-; FORCED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP0]], i64 0
-; FORCED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
; FORCED-NEXT: [[TMP4:%.*]] = extractvalue { i64, i64 } [[SV]], 1
-; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i64 0
+; FORCED-NEXT: [[TMP5:%.*]] = add i64 [[TMP0]], [[TMP4]]
+; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP5]], i64 0
; FORCED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
-; FORCED-NEXT: [[TMP1:%.*]] = add <2 x i64> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT2]]
; FORCED-NEXT: br label %[[VECTOR_BODY:.*]]
; FORCED: [[VECTOR_BODY]]:
; FORCED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; FORCED-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]]
-; FORCED-NEXT: store <2 x i64> [[TMP1]], ptr [[TMP2]], align 4
+; FORCED-NEXT: store <2 x i64> [[BROADCAST_SPLAT2]], ptr [[TMP2]], align 4
; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; FORCED: [[MIDDLE_BLOCK]]:
-; FORCED-NEXT: br [[EXIT:label %.*]]
-; FORCED: [[SCALAR_PH:.*:]]
+; FORCED-NEXT: br label %[[EXIT:.*]]
+; FORCED: [[EXIT]]:
+; FORCED-NEXT: ret void
;
entry:
br label %loop.body
@@ -99,10 +98,11 @@ define void @test_getVectorCallCost(ptr %dst, {float, float} %sv) {
; FORCED-NEXT: store <2 x float> [[TMP2]], ptr [[TMP1]], align 4
; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; FORCED: [[MIDDLE_BLOCK]]:
-; FORCED-NEXT: br [[EXIT:label %.*]]
-; FORCED: [[SCALAR_PH:.*:]]
+; FORCED-NEXT: br label %[[EXIT:.*]]
+; FORCED: [[EXIT]]:
+; FORCED-NEXT: ret void
;
entry:
br label %loop.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
index 0c6a490..eceda08 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
@@ -17,17 +17,15 @@ define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 {
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1000, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1000, [[N_MOD_VF]]
; CHECK-NEXT: [[EXTRACT0:%.*]] = extractvalue { i64, i64 } [[SV]], 0
-; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[EXTRACT0]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { i64, i64 } [[SV]], 1
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP10]], i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[EXTRACT0]], [[TMP10]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT2]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
+; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT2]], ptr [[TMP8]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
index f25b86d..b81637f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
@@ -293,9 +293,9 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[A]], i64 0
+; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[A]], -1
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP19:%.*]] = xor <vscale x 4 x i32> [[BROADCAST_SPLAT]], splat (i32 -1)
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 9)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
@@ -309,7 +309,7 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP19]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll
index 8a57973..372876c 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll
@@ -134,22 +134,18 @@ define i16 @for_phi_removed(ptr %src) {
; UNROLL-NO-IC: [[VECTOR_BODY]]:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
-; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
-; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = select i1 [[TMP4]], <4 x i16> splat (i16 1), <4 x i16> zeroinitializer
+; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 1, i16 0
; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 104
; UNROLL-NO-IC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; UNROLL-NO-IC: [[MIDDLE_BLOCK]]:
-; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3
; UNROLL-NO-IC-NEXT: br label %[[SCALAR_PH:.*]]
; UNROLL-NO-IC: [[SCALAR_PH]]:
; UNROLL-NO-IC-NEXT: br label %[[LOOP:.*]]
; UNROLL-NO-IC: [[LOOP]]:
; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i16 [ 104, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[P:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
+; UNROLL-NO-IC-NEXT: [[P:%.*]] = phi i16 [ [[TMP2]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
; UNROLL-NO-IC-NEXT: [[L:%.*]] = load i32, ptr [[SRC]], align 4
; UNROLL-NO-IC-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; UNROLL-NO-IC-NEXT: [[SEL]] = select i1 [[C]], i16 1, i16 0
@@ -200,22 +196,18 @@ define i16 @for_phi_removed(ptr %src) {
; SINK-AFTER: [[VECTOR_BODY]]:
; SINK-AFTER-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; SINK-AFTER-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; SINK-AFTER-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
-; SINK-AFTER-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; SINK-AFTER-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; SINK-AFTER-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
-; SINK-AFTER-NEXT: [[TMP2:%.*]] = select i1 [[TMP4]], <4 x i16> splat (i16 1), <4 x i16> zeroinitializer
+; SINK-AFTER-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; SINK-AFTER-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 1, i16 0
; SINK-AFTER-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; SINK-AFTER-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 108
; SINK-AFTER-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; SINK-AFTER: [[MIDDLE_BLOCK]]:
-; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3
; SINK-AFTER-NEXT: br label %[[SCALAR_PH:.*]]
; SINK-AFTER: [[SCALAR_PH]]:
; SINK-AFTER-NEXT: br label %[[LOOP:.*]]
; SINK-AFTER: [[LOOP]]:
; SINK-AFTER-NEXT: [[IV:%.*]] = phi i16 [ 108, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; SINK-AFTER-NEXT: [[P:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
+; SINK-AFTER-NEXT: [[P:%.*]] = phi i16 [ [[TMP2]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
; SINK-AFTER-NEXT: [[L:%.*]] = load i32, ptr [[SRC]], align 4
; SINK-AFTER-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; SINK-AFTER-NEXT: [[SEL]] = select i1 [[C]], i16 1, i16 0
diff --git a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
index 7b0c366..440309d 100644
--- a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
+++ b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
@@ -153,3 +153,79 @@ loop:
exit:
ret void
}
+
+define void @narrow_widen_store_user(i32 %x, ptr noalias %A, ptr noalias %B) {
+; VF4IC1-LABEL: define void @narrow_widen_store_user(
+; VF4IC1-SAME: i32 [[X:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
+; VF4IC1-NEXT: [[ENTRY:.*:]]
+; VF4IC1-NEXT: br label %[[VECTOR_PH:.*]]
+; VF4IC1: [[VECTOR_PH]]:
+; VF4IC1-NEXT: [[TMP0:%.*]] = add i32 [[X]], 1
+; VF4IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
+; VF4IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; VF4IC1-NEXT: [[TMP5:%.*]] = mul i32 [[TMP0]], 3
+; VF4IC1-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[TMP5]], i64 0
+; VF4IC1-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer
+; VF4IC1-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF4IC1: [[VECTOR_BODY]]:
+; VF4IC1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; VF4IC1-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]]
+; VF4IC1-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 4
+; VF4IC1-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP3]], align 4
+; VF4IC1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF4IC1-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
+; VF4IC1-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF4IC1: [[MIDDLE_BLOCK]]:
+; VF4IC1-NEXT: br label %[[EXIT:.*]]
+; VF4IC1: [[EXIT]]:
+; VF4IC1-NEXT: ret void
+;
+; VF2IC2-LABEL: define void @narrow_widen_store_user(
+; VF2IC2-SAME: i32 [[X:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
+; VF2IC2-NEXT: [[ENTRY:.*:]]
+; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]]
+; VF2IC2: [[VECTOR_PH]]:
+; VF2IC2-NEXT: [[TMP0:%.*]] = add i32 [[X]], 1
+; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i64 0
+; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
+; VF2IC2-NEXT: [[TMP7:%.*]] = mul i32 [[TMP0]], 3
+; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i64 0
+; VF2IC2-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT1]], <2 x i32> poison, <2 x i32> zeroinitializer
+; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF2IC2: [[VECTOR_BODY]]:
+; VF2IC2-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF2IC2-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; VF2IC2-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]]
+; VF2IC2-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP2]], i32 2
+; VF2IC2-NEXT: store <2 x i32> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 4
+; VF2IC2-NEXT: store <2 x i32> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 4
+; VF2IC2-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP3]], i32 2
+; VF2IC2-NEXT: store <2 x i32> [[TMP1]], ptr [[TMP3]], align 4
+; VF2IC2-NEXT: store <2 x i32> [[TMP1]], ptr [[TMP5]], align 4
+; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF2IC2-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
+; VF2IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF2IC2: [[MIDDLE_BLOCK]]:
+; VF2IC2-NEXT: br label %[[EXIT:.*]]
+; VF2IC2: [[EXIT]]:
+; VF2IC2-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.A = getelementptr i32, ptr %A, i32 %iv
+ %gep.B = getelementptr i32, ptr %B, i32 %iv
+ %wide.add = add i32 %x, 1
+ %wide.mul = mul i32 %wide.add, 3
+ store i32 %wide.add, ptr %gep.A
+ store i32 %wide.mul, ptr %gep.B
+ %iv.next = add i32 %iv, 1
+ %ec = icmp ne i32 %iv.next, 1024
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/pr50686.ll b/llvm/test/Transforms/LoopVectorize/pr50686.ll
index 878fbec..be9110c 100644
--- a/llvm/test/Transforms/LoopVectorize/pr50686.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr50686.ll
@@ -18,20 +18,16 @@ define void @m(ptr nocapture %p, ptr nocapture %p2, i32 %q) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[P2]], align 4, !alias.scope [[META0:![0-9]+]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = sub nsw <4 x i32> zeroinitializer, [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX9_1]], align 4, !alias.scope [[META0]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP2]], [[BROADCAST_SPLAT3]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX9_2]], align 4, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[P2]], align 4, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub nsw i32 0, [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX9_1]], align 4, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX9_2]], align 4, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub nsw i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP5]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> [[TMP4]], [[BROADCAST_SPLAT5]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDEX]]
-; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP7]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
+; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT5]], ptr [[TMP7]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 60
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
diff --git a/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll
new file mode 100644
index 0000000..1d9cf6a
--- /dev/null
+++ b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll
@@ -0,0 +1,21 @@
+; REQUIRES: aarch64-registered-target
+; RUN: opt -S -passes=declare-runtime-libcalls -mtriple=aarch64-unknown-linux -mattr=+neon,+sve -vector-library=ArmPL < %s | FileCheck %s
+
+; CHECK: declare void @armpl_svsincos_f32_x(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 4 x i1>) [[ATTRS:#[0-9]+]]
+
+; CHECK: declare void @armpl_svsincos_f64_x(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 2 x i1>) [[ATTRS]]
+
+; CHECK: declare void @armpl_svsincospi_f32_x(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 4 x i1>) [[ATTRS]]
+
+; CHECK: declare void @armpl_svsincospi_f64_x(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 2 x i1>) [[ATTRS]]
+
+; CHECK: declare void @armpl_vsincospiq_f32(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @armpl_vsincospiq_f64(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare aarch64_vector_pcs void @armpl_vsincosq_f32(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare aarch64_vector_pcs void @armpl_vsincosq_f64(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+
+; CHECK: attributes [[ATTRS]] = { nocallback nofree nosync nounwind willreturn memory(argmem: write) }
diff --git a/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll
new file mode 100644
index 0000000..2c69007
--- /dev/null
+++ b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll
@@ -0,0 +1,20 @@
+; REQUIRES: aarch64-registered-target
+; RUN: opt -S -passes=declare-runtime-libcalls -mtriple=aarch64-unknown-linux -mattr=+neon,+sve -vector-library=sleefgnuabi < %s | FileCheck %s
+
+; CHECK: declare void @_ZGVnN2vl8l8_sincos(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS:#[0-9]+]]
+
+; CHECK: declare void @_ZGVnN2vl8l8_sincospi(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVnN4vl4l4_sincosf(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVnN4vl4l4_sincospif(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl4l4_sincosf(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl4l4_sincospif(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl8l8_sincos(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl8l8_sincospi(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: attributes [[ATTRS]] = { nocallback nofree nosync nounwind willreturn memory(argmem: write) }
diff --git a/llvm/test/tools/llc/save-stats.ll b/llvm/test/tools/llc/save-stats.ll
index 4950625..a5769f8 100644
--- a/llvm/test/tools/llc/save-stats.ll
+++ b/llvm/test/tools/llc/save-stats.ll
@@ -1,5 +1,6 @@
; REQUIRES: asserts
+; RUN: rm -rf %t.dir && mkdir -p %t.dir && cd %t.dir
; RUN: llc --save-stats=obj -o %t.s %s && cat %t.stats | FileCheck %s
; RUN: llc --save-stats=cwd -o %t.s %s && cat %{t:stem}.tmp.stats | FileCheck %s
; RUN: llc --save-stats -o %t.s %s && cat %{t:stem}.tmp.stats | FileCheck %s