aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir3
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir9
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir3
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir6
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll204
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vadd.ll341
-rw-r--r--llvm/test/CodeGen/AArch64/combine-sdiv.ll137
-rw-r--r--llvm/test/CodeGen/AArch64/extract-vector-elt.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fcmp.ll18
-rw-r--r--llvm/test/CodeGen/AArch64/fpclamptosat.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll101
-rw-r--r--llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/neon-compare-instructions.ll113
-rw-r--r--llvm/test/CodeGen/AArch64/neon-shift-left-long.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/select_cc.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/stack-hazard.ll272
-rw-r--r--llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll131
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll231
-rw-r--r--llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll410
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/calling-conventions.ll202
-rw-r--r--llvm/test/CodeGen/AMDGPU/carryout-selection.ll166
-rw-r--r--llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir52
-rw-r--r--llvm/test/CodeGen/AMDGPU/ds_write2.ll19
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll1081
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmax3.ll228
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmin3.ll300
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll151
-rw-r--r--llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir12
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll54
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad_64_32.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/mai-hazards.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/max.ll55
-rw-r--r--llvm/test/CodeGen/AMDGPU/min.ll129
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul.ll81
-rw-r--r--llvm/test/CodeGen/AMDGPU/packed-fp32.ll444
-rw-r--r--llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir16
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir12
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir24
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/true16-fold.mir25
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/wait-xcnt.mir9
-rw-r--r--llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll72
-rw-r--r--llvm/test/CodeGen/ARM/and-mask-variable.ll90
-rw-r--r--llvm/test/CodeGen/ARM/extract-bits.ll4591
-rw-r--r--llvm/test/CodeGen/ARM/extract-lowbits.ll2752
-rw-r--r--llvm/test/CodeGen/ARM/fpclamptosat.ll48
-rw-r--r--llvm/test/CodeGen/ARM/fpclamptosat_vec.ll107
-rw-r--r--llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll93
-rw-r--r--llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll12
-rw-r--r--llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll42
-rw-r--r--llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll25
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll59
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll25
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir5
-rw-r--r--llvm/test/CodeGen/RISCV/double-intrinsics.ll82
-rw-r--r--llvm/test/CodeGen/RISCV/float-intrinsics.ll81
-rw-r--r--llvm/test/CodeGen/RISCV/fp128.ll43
-rw-r--r--llvm/test/CodeGen/RISCV/fpclamptosat.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/half-intrinsics.ll158
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll202
-rw-r--r--llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll25
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll28
-rw-r--r--llvm/test/CodeGen/WebAssembly/fpclamptosat.ll89
-rw-r--r--llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll78
-rw-r--r--llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll6
-rw-r--r--llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll6
-rw-r--r--llvm/test/CodeGen/X86/avx2-vector-shifts.ll12
-rw-r--r--llvm/test/CodeGen/X86/fpclamptosat.ll45
-rw-r--r--llvm/test/CodeGen/X86/fpclamptosat_vec.ll105
-rw-r--r--llvm/test/CodeGen/X86/gfni-shifts.ll168
-rw-r--r--llvm/test/CodeGen/X86/isel-fpclass.ll256
-rw-r--r--llvm/test/CodeGen/X86/isel-smax.ll244
-rw-r--r--llvm/test/CodeGen/X86/isel-smin.ll244
-rw-r--r--llvm/test/CodeGen/X86/isel-umax.ll244
-rw-r--r--llvm/test/CodeGen/X86/isel-umin.ll244
-rw-r--r--llvm/test/CodeGen/X86/logic-shift.ll36
-rw-r--r--llvm/test/CodeGen/X86/pr161693.ll40
-rw-r--r--llvm/test/CodeGen/X86/prefer-avx256-shift.ll36
-rw-r--r--llvm/test/CodeGen/X86/shuffle-as-shifts.ll54
-rw-r--r--llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll8
-rw-r--r--llvm/test/CodeGen/X86/sshl_sat_vec.ll12
-rw-r--r--llvm/test/CodeGen/X86/vec-strict-cmp-128.ll32
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-128.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-256.ll34
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-sub128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-sext.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-ashr-128.ll28
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-ashr-256.ll164
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-ashr-512.ll36
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll68
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-lshr-128.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-lshr-256.ll68
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll32
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-zext.ll6
-rw-r--r--llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll8
-rw-r--r--llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll8
127 files changed, 12739 insertions, 4057 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
index 7872c02..461a7ef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
@@ -177,7 +177,7 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
; GISEL-NEXT: neg v2.16b, v3.16b
; GISEL-NEXT: shl v3.16b, v4.16b, #7
; GISEL-NEXT: ushl v1.16b, v1.16b, v2.16b
-; GISEL-NEXT: sshr v2.16b, v3.16b, #7
+; GISEL-NEXT: cmlt v2.16b, v3.16b, #0
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%div = udiv <16 x i8> %x, <i8 -64, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -229,7 +229,7 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
; GISEL-NEXT: neg v2.8h, v4.8h
; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: sshr v2.8h, v3.8h, #15
+; GISEL-NEXT: cmlt v2.8h, v3.8h, #0
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %a0, <i16 1, i16 119, i16 73, i16 -111, i16 -3, i16 118, i16 32, i16 31>
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir
index 0b950b7..76d4d29 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-sextinreg.mir
@@ -14,8 +14,7 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[DUP:%[0-9]+]]:_(<4 x s32>) = G_DUP [[C]](s32)
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL %v1, [[DUP]](<4 x s32>)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CHECK-NEXT: %sext:_(<4 x s32>) = G_VASHR [[SHL]], [[C1]](s32)
+ ; CHECK-NEXT: %sext:_(<4 x s32>) = G_VASHR [[SHL]], 16
; CHECK-NEXT: $q0 = COPY %sext(<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%v1:_(<4 x s32>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
index b3fb5a4..dfaddba 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
@@ -15,8 +15,7 @@ body: |
; CHECK: liveins: $d0, $d1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK-NEXT: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], 5
; CHECK-NEXT: $q0 = COPY [[VASHR]](<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<4 x s32>) = COPY $q0
@@ -39,8 +38,7 @@ body: |
; CHECK: liveins: $d0, $d1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], 5
; CHECK-NEXT: $q0 = COPY [[VLSHR]](<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<4 x s32>) = COPY $q0
@@ -63,8 +61,7 @@ body: |
; CHECK: liveins: $d0, $d1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], 5
; CHECK-NEXT: $q0 = COPY [[VLSHR]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
index c38e4a8..cf227cb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
@@ -29,7 +29,6 @@ body: |
; CHECK-NEXT: [[UCVTFd:%[0-9]+]]:fpr64 = UCVTFd [[COPY]], 12
; CHECK-NEXT: $d1 = COPY [[UCVTFd]]
%0(s64) = COPY $d0
- %1(s32) = G_CONSTANT i32 12
- %2(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.vcvtfxu2fp.f64), %0, %1
+ %2(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.vcvtfxu2fp.f64), %0, 12
$d1 = COPY %2(s64)
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir
index 0706115..9fa6326 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir
@@ -499,8 +499,7 @@ body: |
; CHECK-NEXT: $d0 = COPY [[SSHRv4i16_shift]]
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:fpr(<4 x s16>) = COPY $d0
- %1:gpr(s32) = G_CONSTANT i32 5
- %2:fpr(<4 x s16>) = G_VASHR %0, %1
+ %2:fpr(<4 x s16>) = G_VASHR %0, 5
$d0 = COPY %2(<4 x s16>)
RET_ReallyLR implicit $d0
...
@@ -520,8 +519,7 @@ body: |
; CHECK-NEXT: $d0 = COPY [[USHRv4i16_shift]]
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:fpr(<4 x s16>) = COPY $d0
- %1:gpr(s32) = G_CONSTANT i32 5
- %2:fpr(<4 x s16>) = G_VLSHR %0, %1
+ %2:fpr(<4 x s16>) = G_VLSHR %0, 5
$d0 = COPY %2(<4 x s16>)
RET_ReallyLR implicit $d0
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index cdde110..63c08dd 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -902,7 +902,7 @@ define void @sink_v8z16_0(ptr %p, ptr %d, i64 %n, <16 x i8> %a) {
; CHECK-GI-NEXT: subs x2, x2, #8
; CHECK-GI-NEXT: add x8, x8, #8
; CHECK-GI-NEXT: umull v1.8h, v1.8b, v0.8b
-; CHECK-GI-NEXT: sshr v1.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v1.8h, v1.8h, #0
; CHECK-GI-NEXT: xtn v1.8b, v1.8h
; CHECK-GI-NEXT: str d1, [x0], #32
; CHECK-GI-NEXT: b.ne .LBB8_1
@@ -967,8 +967,8 @@ define void @sink_v16s16_8(ptr %p, ptr %d, i64 %n, <16 x i8> %a) {
; CHECK-GI-NEXT: mov d2, v1.d[1]
; CHECK-GI-NEXT: smull v1.8h, v1.8b, v0.8b
; CHECK-GI-NEXT: smull v2.8h, v2.8b, v0.8b
-; CHECK-GI-NEXT: sshr v1.8h, v1.8h, #15
-; CHECK-GI-NEXT: sshr v2.8h, v2.8h, #15
+; CHECK-GI-NEXT: cmlt v1.8h, v1.8h, #0
+; CHECK-GI-NEXT: cmlt v2.8h, v2.8h, #0
; CHECK-GI-NEXT: uzp1 v1.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: str q1, [x0], #32
; CHECK-GI-NEXT: b.ne .LBB9_1
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
index 9bafc5b..2a8b3ce2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
@@ -999,16 +999,10 @@ entry:
}
define <8 x i8> @test_vaddhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vaddhn_s16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_s16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_s16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <8 x i16> %a, %b
%vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1017,16 +1011,10 @@ entry:
}
define <4 x i16> @test_vaddhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vaddhn_s32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_s32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_s32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <4 x i32> %a, %b
%vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1035,16 +1023,10 @@ entry:
}
define <2 x i32> @test_vaddhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vaddhn_s64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_s64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <2 x i64> %a, %b
%vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
@@ -1053,16 +1035,10 @@ entry:
}
define <8 x i8> @test_vaddhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vaddhn_u16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_u16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_u16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <8 x i16> %a, %b
%vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1071,16 +1047,10 @@ entry:
}
define <4 x i16> @test_vaddhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vaddhn_u32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_u32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_u32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <4 x i32> %a, %b
%vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1089,16 +1059,10 @@ entry:
}
define <2 x i32> @test_vaddhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vaddhn_u64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: addhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vaddhn_u64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vaddhn_u64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: addhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vaddhn.i = add <2 x i64> %a, %b
%vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
@@ -1115,9 +1079,8 @@ define <16 x i8> @test_vaddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_s16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: addhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1141,9 +1104,8 @@ define <8 x i16> @test_vaddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_s32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1167,9 +1129,8 @@ define <4 x i32> @test_vaddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_s64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: addhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1193,9 +1154,8 @@ define <16 x i8> @test_vaddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_u16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: addhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1219,9 +1179,8 @@ define <8 x i16> @test_vaddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_u32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1245,9 +1204,8 @@ define <4 x i32> @test_vaddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vaddhn_high_u64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: add v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: addhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1461,16 +1419,10 @@ entry:
}
define <8 x i8> @test_vsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vsubhn_s16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_s16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_s16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <8 x i16> %a, %b
%vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1479,16 +1431,10 @@ entry:
}
define <4 x i16> @test_vsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vsubhn_s32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_s32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_s32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <4 x i32> %a, %b
%vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1497,16 +1443,10 @@ entry:
}
define <2 x i32> @test_vsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vsubhn_s64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_s64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <2 x i64> %a, %b
%vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
@@ -1515,16 +1455,10 @@ entry:
}
define <8 x i8> @test_vsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK-SD-LABEL: test_vsubhn_u16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_u16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_u16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <8 x i16> %a, %b
%vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -1533,16 +1467,10 @@ entry:
}
define <4 x i16> @test_vsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-SD-LABEL: test_vsubhn_u32:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_u32:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_u32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <4 x i32> %a, %b
%vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
@@ -1551,16 +1479,10 @@ entry:
}
define <2 x i32> @test_vsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-SD-LABEL: test_vsubhn_u64:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: subhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: test_vsubhn_u64:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: test_vsubhn_u64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: subhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
entry:
%vsubhn.i = sub <2 x i64> %a, %b
%vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
@@ -1577,9 +1499,8 @@ define <16 x i8> @test_vsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_s16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: subhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1603,9 +1524,8 @@ define <8 x i16> @test_vsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_s32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: subhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1629,9 +1549,8 @@ define <4 x i32> @test_vsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_s64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: subhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1655,9 +1574,8 @@ define <16 x i8> @test_vsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_u16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: subhn v1.8b, v1.8h, v2.8h
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.8b, v1.8h, #8
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1681,9 +1599,8 @@ define <8 x i16> @test_vsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_u32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: subhn v1.4h, v1.4s, v2.4s
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.4h, v1.4s, #16
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
@@ -1707,9 +1624,8 @@ define <4 x i32> @test_vsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b)
;
; CHECK-GI-LABEL: test_vsubhn_high_u64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub v1.2d, v1.2d, v2.2d
+; CHECK-GI-NEXT: subhn v1.2s, v1.2d, v2.2d
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: shrn v1.2s, v1.2d, #32
; CHECK-GI-NEXT: fmov x8, d1
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll b/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
index 84879d1..03e6ca1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
@@ -524,8 +524,8 @@ define <32 x i8> @sext_v32i1(<32 x i1> %arg) {
; CHECK-GI-NEXT: mov.b v1[15], w9
; CHECK-GI-NEXT: shl.16b v0, v0, #7
; CHECK-GI-NEXT: shl.16b v1, v1, #7
-; CHECK-GI-NEXT: sshr.16b v0, v0, #7
-; CHECK-GI-NEXT: sshr.16b v1, v1, #7
+; CHECK-GI-NEXT: cmlt.16b v0, v0, #0
+; CHECK-GI-NEXT: cmlt.16b v1, v1, #0
; CHECK-GI-NEXT: ret
%res = sext <32 x i1> %arg to <32 x i8>
ret <32 x i8> %res
@@ -934,10 +934,10 @@ define <64 x i8> @sext_v64i1(<64 x i1> %arg) {
; CHECK-GI-NEXT: shl.16b v1, v1, #7
; CHECK-GI-NEXT: shl.16b v2, v2, #7
; CHECK-GI-NEXT: shl.16b v3, v3, #7
-; CHECK-GI-NEXT: sshr.16b v0, v0, #7
-; CHECK-GI-NEXT: sshr.16b v1, v1, #7
-; CHECK-GI-NEXT: sshr.16b v2, v2, #7
-; CHECK-GI-NEXT: sshr.16b v3, v3, #7
+; CHECK-GI-NEXT: cmlt.16b v0, v0, #0
+; CHECK-GI-NEXT: cmlt.16b v1, v1, #0
+; CHECK-GI-NEXT: cmlt.16b v2, v2, #0
+; CHECK-GI-NEXT: cmlt.16b v3, v3, #0
; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-GI-NEXT: ret
%res = sext <64 x i1> %arg to <64 x i8>
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index c408d7f..a3f4722 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1914,21 +1914,13 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
}
define <8 x i16> @pr88784(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
-; CHECK-SD-LABEL: pr88784:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: usubl.8h v0, v0, v1
-; CHECK-SD-NEXT: cmlt.8h v1, v2, #0
-; CHECK-SD-NEXT: ssra.8h v0, v2, #15
-; CHECK-SD-NEXT: eor.16b v0, v1, v0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: pr88784:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: usubl.8h v0, v0, v1
-; CHECK-GI-NEXT: sshr.8h v1, v2, #15
-; CHECK-GI-NEXT: ssra.8h v0, v2, #15
-; CHECK-GI-NEXT: eor.16b v0, v1, v0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: pr88784:
+; CHECK: // %bb.0:
+; CHECK-NEXT: usubl.8h v0, v0, v1
+; CHECK-NEXT: cmlt.8h v1, v2, #0
+; CHECK-NEXT: ssra.8h v0, v2, #15
+; CHECK-NEXT: eor.16b v0, v1, v0
+; CHECK-NEXT: ret
%l4 = zext <8 x i8> %l0 to <8 x i16>
%l5 = ashr <8 x i16> %l2, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%l6 = zext <8 x i8> %l1 to <8 x i16>
@@ -1947,7 +1939,7 @@ define <8 x i16> @pr88784_fixed(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
; CHECK-GI-LABEL: pr88784_fixed:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: usubl.8h v0, v0, v1
-; CHECK-GI-NEXT: sshr.8h v1, v0, #15
+; CHECK-GI-NEXT: cmlt.8h v1, v0, #0
; CHECK-GI-NEXT: ssra.8h v0, v0, #15
; CHECK-GI-NEXT: eor.16b v0, v1, v0
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-vadd.ll b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
index 11fb732..938712a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
@@ -1103,20 +1103,12 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
}
define <8 x i8> @addhn8b_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn8b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: addhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn8b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn8b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: addhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%sum = add <8 x i16> %tmp1, %tmp2
@@ -1126,20 +1118,12 @@ define <8 x i8> @addhn8b_natural(ptr %A, ptr %B) nounwind {
}
define <4 x i16> @addhn4h_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn4h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: addhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn4h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn4h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%sum = add <4 x i32> %tmp1, %tmp2
@@ -1149,20 +1133,12 @@ define <4 x i16> @addhn4h_natural(ptr %A, ptr %B) nounwind {
}
define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: addhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: addhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%sum = add <2 x i64> %tmp1, %tmp2
@@ -1172,22 +1148,13 @@ define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind {
}
define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2_16b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: addhn2 v0.16b, v1.8h, v2.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2_16b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: add v1.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT: shrn2 v0.16b, v1.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2_16b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: addhn2 v0.16b, v1.8h, v2.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%sum = add <8 x i16> %tmp1, %tmp2
@@ -1198,22 +1165,13 @@ define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
}
define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2_8h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: addhn2 v0.8h, v1.4s, v2.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2_8h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: shrn2 v0.8h, v1.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2_8h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: addhn2 v0.8h, v1.4s, v2.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%sum = add <4 x i32> %tmp1, %tmp2
@@ -1224,22 +1182,13 @@ define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
}
define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: addhn2_4s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: addhn2 v0.4s, v1.2d, v2.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn2_4s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: add v1.2d, v1.2d, v2.2d
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn2_4s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: addhn2 v0.4s, v1.2d, v2.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%sum = add <2 x i64> %tmp1, %tmp2
@@ -1250,22 +1199,13 @@ define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
}
define <4 x i32> @addhn_addhn2_4s(ptr %A, ptr %B, ptr %C, ptr %D) nounwind {
-; CHECK-SD-LABEL: addhn_addhn2_4s:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: addhn v0.2s, v1.2d, v2.2d
-; CHECK-SD-NEXT: addhn2 v0.4s, v1.2d, v2.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: addhn_addhn2_4s:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: add v1.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v1.2d, #32
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: addhn_addhn2_4s:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: addhn v0.2s, v1.2d, v2.2d
+; CHECK-NEXT: addhn2 v0.4s, v1.2d, v2.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%sum1 = add <2 x i64> %tmp1, %tmp2
@@ -1281,20 +1221,12 @@ define <4 x i32> @addhn_addhn2_4s(ptr %A, ptr %B, ptr %C, ptr %D) nounwind {
}
define <8 x i8> @subhn8b_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn8b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: subhn v0.8b, v0.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn8b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn8b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: subhn v0.8b, v0.8h, v1.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%diff = sub <8 x i16> %tmp1, %tmp2
@@ -1304,20 +1236,12 @@ define <8 x i8> @subhn8b_natural(ptr %A, ptr %B) nounwind {
}
define <4 x i16> @subhn4h_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn4h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: subhn v0.4h, v0.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn4h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn4h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: subhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%diff = sub <4 x i32> %tmp1, %tmp2
@@ -1327,20 +1251,12 @@ define <4 x i16> @subhn4h_natural(ptr %A, ptr %B) nounwind {
}
define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q0, [x0]
-; CHECK-SD-NEXT: ldr q1, [x1]
-; CHECK-SD-NEXT: subhn v0.2s, v0.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q0, [x0]
-; CHECK-GI-NEXT: ldr q1, [x1]
-; CHECK-GI-NEXT: sub v0.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: subhn v0.2s, v0.2d, v1.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%diff = sub <2 x i64> %tmp1, %tmp2
@@ -1350,22 +1266,13 @@ define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind {
}
define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2_16b_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: subhn2 v0.16b, v1.8h, v2.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2_16b_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: sub v1.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT: shrn2 v0.16b, v1.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2_16b_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: subhn2 v0.16b, v1.8h, v2.8h
+; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%diff = sub <8 x i16> %tmp1, %tmp2
@@ -1376,22 +1283,13 @@ define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
}
define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2_8h_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: subhn2 v0.8h, v1.4s, v2.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2_8h_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: sub v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: shrn2 v0.8h, v1.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2_8h_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: subhn2 v0.8h, v1.4s, v2.4s
+; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%diff = sub <4 x i32> %tmp1, %tmp2
@@ -1402,22 +1300,13 @@ define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
}
define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
-; CHECK-SD-LABEL: subhn2_4s_natural:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: ldr q1, [x0]
-; CHECK-SD-NEXT: ldr q2, [x1]
-; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: subhn2 v0.4s, v1.2d, v2.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: subhn2_4s_natural:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr q1, [x0]
-; CHECK-GI-NEXT: ldr q2, [x1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: sub v1.2d, v1.2d, v2.2d
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: subhn2_4s_natural:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q1, [x0]
+; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: subhn2 v0.4s, v1.2d, v2.2d
+; CHECK-NEXT: ret
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%diff = sub <2 x i64> %tmp1, %tmp2
@@ -1428,20 +1317,12 @@ define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
}
define <16 x i8> @neg_narrow_i8(<16 x i16> %a) {
-; CHECK-SD-LABEL: neg_narrow_i8:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-SD-NEXT: subhn v0.8b, v2.8h, v0.8h
-; CHECK-SD-NEXT: subhn2 v0.16b, v2.8h, v1.8h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: neg_narrow_i8:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mvn v0.16b, v0.16b
-; CHECK-GI-NEXT: mvn v1.16b, v1.16b
-; CHECK-GI-NEXT: shrn v0.8b, v0.8h, #8
-; CHECK-GI-NEXT: shrn2 v0.16b, v1.8h, #8
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: neg_narrow_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT: subhn v0.8b, v2.8h, v0.8h
+; CHECK-NEXT: subhn2 v0.16b, v2.8h, v1.8h
+; CHECK-NEXT: ret
%not.i = xor <16 x i16> %a, splat (i16 -1)
%s = lshr <16 x i16> %not.i, splat (i16 8)
%vshrn_n = trunc nuw <16 x i16> %s to <16 x i8>
@@ -1449,20 +1330,12 @@ define <16 x i8> @neg_narrow_i8(<16 x i16> %a) {
}
define <8 x i16> @neg_narrow_i16(<8 x i32> %a) {
-; CHECK-SD-LABEL: neg_narrow_i16:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-SD-NEXT: subhn v0.4h, v2.4s, v0.4s
-; CHECK-SD-NEXT: subhn2 v0.8h, v2.4s, v1.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: neg_narrow_i16:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mvn v0.16b, v0.16b
-; CHECK-GI-NEXT: mvn v1.16b, v1.16b
-; CHECK-GI-NEXT: shrn v0.4h, v0.4s, #16
-; CHECK-GI-NEXT: shrn2 v0.8h, v1.4s, #16
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: neg_narrow_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT: subhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT: subhn2 v0.8h, v2.4s, v1.4s
+; CHECK-NEXT: ret
%not.i = xor <8 x i32> %a, splat (i32 -1)
%s = lshr <8 x i32> %not.i, splat (i32 16)
%vshrn_n = trunc nuw <8 x i32> %s to <8 x i16>
@@ -1470,20 +1343,12 @@ define <8 x i16> @neg_narrow_i16(<8 x i32> %a) {
}
define <4 x i32> @neg_narrow_i32(<4 x i64> %a) {
-; CHECK-SD-LABEL: neg_narrow_i32:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-SD-NEXT: subhn v0.2s, v2.2d, v0.2d
-; CHECK-SD-NEXT: subhn2 v0.4s, v2.2d, v1.2d
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: neg_narrow_i32:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: mvn v0.16b, v0.16b
-; CHECK-GI-NEXT: mvn v1.16b, v1.16b
-; CHECK-GI-NEXT: shrn v0.2s, v0.2d, #32
-; CHECK-GI-NEXT: shrn2 v0.4s, v1.2d, #32
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: neg_narrow_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT: subhn v0.2s, v2.2d, v0.2d
+; CHECK-NEXT: subhn2 v0.4s, v2.2d, v1.2d
+; CHECK-NEXT: ret
%not.i = xor <4 x i64> %a, splat (i64 -1)
%s = lshr <4 x i64> %not.i, splat (i64 32)
%vshrn_n = trunc nuw <4 x i64> %s to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/combine-sdiv.ll b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
index 9d0ade2..dc88f94 100644
--- a/llvm/test/CodeGen/AArch64/combine-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
@@ -66,9 +66,9 @@ define <4 x i32> @combine_vec_sdiv_by_minsigned(<4 x i32> %x) {
;
; CHECK-GI-LABEL: combine_vec_sdiv_by_minsigned:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v1.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v0.4s, #0
; CHECK-GI-NEXT: usra v0.4s, v1.4s, #1
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: neg v0.4s, v0.4s
; CHECK-GI-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
@@ -176,7 +176,7 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; CHECK-GI-NEXT: mov v1.s[2], w9
; CHECK-GI-NEXT: mov v1.s[3], w9
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
@@ -185,39 +185,24 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
}
define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
-; CHECK-SD-LABEL: combine_vec_sdiv_by_pow2a:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v1.4s, v0.4s, #0
-; CHECK-SD-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-SD-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2a:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v1.4s, v0.4s, #31
-; CHECK-GI-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: combine_vec_sdiv_by_pow2a:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v1.4s, v0.4s, #0
+; CHECK-NEXT: usra v0.4s, v1.4s, #30
+; CHECK-NEXT: sshr v0.4s, v0.4s, #2
+; CHECK-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
ret <4 x i32> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2a_neg(<4 x i32> %x) {
-; CHECK-SD-LABEL: combine_vec_sdiv_by_pow2a_neg:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v1.4s, v0.4s, #0
-; CHECK-SD-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-SD-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-SD-NEXT: neg v0.4s, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2a_neg:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v1.4s, v0.4s, #31
-; CHECK-GI-NEXT: usra v0.4s, v1.4s, #30
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #2
-; CHECK-GI-NEXT: neg v0.4s, v0.4s
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: combine_vec_sdiv_by_pow2a_neg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v1.4s, v0.4s, #0
+; CHECK-NEXT: usra v0.4s, v1.4s, #30
+; CHECK-NEXT: sshr v0.4s, v0.4s, #2
+; CHECK-NEXT: neg v0.4s, v0.4s
+; CHECK-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 -4, i32 -4, i32 -4, i32 -4>
ret <4 x i32> %1
}
@@ -240,7 +225,7 @@ define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI14_1
-; CHECK-GI-NEXT: sshr v2.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v0.16b, #0
; CHECK-GI-NEXT: adrp x9, .LCPI14_0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_1]
; CHECK-GI-NEXT: adrp x8, .LCPI14_2
@@ -252,7 +237,7 @@ define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) {
; CHECK-GI-NEXT: neg v2.16b, v2.16b
; CHECK-GI-NEXT: add v1.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: sshl v1.16b, v1.16b, v2.16b
-; CHECK-GI-NEXT: sshr v2.16b, v3.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v3.16b, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <16 x i8> %x, <i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2, i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2>
@@ -278,7 +263,7 @@ define <8 x i16> @combine_vec_sdiv_by_pow2b_v8i16(<8 x i16> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI15_1
-; CHECK-GI-NEXT: sshr v2.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v2.8h, v0.8h, #0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI15_1]
; CHECK-GI-NEXT: adrp x8, .LCPI15_0
; CHECK-GI-NEXT: ldr d3, [x8, :lo12:.LCPI15_0]
@@ -291,7 +276,7 @@ define <8 x i16> @combine_vec_sdiv_by_pow2b_v8i16(<8 x i16> %x) {
; CHECK-GI-NEXT: add v1.8h, v0.8h, v1.8h
; CHECK-GI-NEXT: shl v2.8h, v2.8h, #15
; CHECK-GI-NEXT: sshl v1.8h, v1.8h, v3.8h
-; CHECK-GI-NEXT: sshr v2.8h, v2.8h, #15
+; CHECK-GI-NEXT: cmlt v2.8h, v2.8h, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <8 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2>
@@ -322,8 +307,8 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI16_1
-; CHECK-GI-NEXT: sshr v3.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v4.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v3.8h, v0.8h, #0
+; CHECK-GI-NEXT: cmlt v4.8h, v1.8h, #0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI16_1]
; CHECK-GI-NEXT: adrp x8, .LCPI16_0
; CHECK-GI-NEXT: ldr d5, [x8, :lo12:.LCPI16_0]
@@ -339,7 +324,7 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) {
; CHECK-GI-NEXT: add v2.8h, v1.8h, v2.8h
; CHECK-GI-NEXT: sshl v3.8h, v3.8h, v4.8h
; CHECK-GI-NEXT: sshl v2.8h, v2.8h, v4.8h
-; CHECK-GI-NEXT: sshr v4.8h, v5.8h, #15
+; CHECK-GI-NEXT: cmlt v4.8h, v5.8h, #0
; CHECK-GI-NEXT: bif v0.16b, v3.16b, v4.16b
; CHECK-GI-NEXT: bif v1.16b, v2.16b, v4.16b
; CHECK-GI-NEXT: ret
@@ -381,12 +366,12 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI17_1
-; CHECK-GI-NEXT: sshr v5.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v6.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v5.8h, v0.8h, #0
+; CHECK-GI-NEXT: cmlt v6.8h, v1.8h, #0
; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI17_1]
; CHECK-GI-NEXT: adrp x8, .LCPI17_0
-; CHECK-GI-NEXT: sshr v7.8h, v2.8h, #15
-; CHECK-GI-NEXT: sshr v16.8h, v3.8h, #15
+; CHECK-GI-NEXT: cmlt v7.8h, v2.8h, #0
+; CHECK-GI-NEXT: cmlt v16.8h, v3.8h, #0
; CHECK-GI-NEXT: ldr d17, [x8, :lo12:.LCPI17_0]
; CHECK-GI-NEXT: adrp x8, .LCPI17_2
; CHECK-GI-NEXT: neg v4.8h, v4.8h
@@ -402,7 +387,7 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) {
; CHECK-GI-NEXT: add v6.8h, v1.8h, v6.8h
; CHECK-GI-NEXT: add v7.8h, v2.8h, v7.8h
; CHECK-GI-NEXT: add v4.8h, v3.8h, v4.8h
-; CHECK-GI-NEXT: sshr v17.8h, v17.8h, #15
+; CHECK-GI-NEXT: cmlt v17.8h, v17.8h, #0
; CHECK-GI-NEXT: sshl v5.8h, v5.8h, v16.8h
; CHECK-GI-NEXT: sshl v6.8h, v6.8h, v16.8h
; CHECK-GI-NEXT: sshl v7.8h, v7.8h, v16.8h
@@ -436,7 +421,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_v4i32(<4 x i32> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v3.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v3.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s1, w8
; CHECK-GI-NEXT: adrp x8, .LCPI18_0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI18_0]
@@ -451,7 +436,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_v4i32(<4 x i32> %x) {
; CHECK-GI-NEXT: mov v1.s[3], w9
; CHECK-GI-NEXT: sshl v2.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
@@ -483,10 +468,10 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v4.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v4.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s2, w8
; CHECK-GI-NEXT: adrp x8, .LCPI19_0
-; CHECK-GI-NEXT: sshr v5.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v5.4s, v1.4s, #0
; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI19_0]
; CHECK-GI-NEXT: adrp x8, .LCPI19_1
; CHECK-GI-NEXT: mov v2.h[1], w9
@@ -503,7 +488,7 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
; CHECK-GI-NEXT: sshl v3.4s, v3.4s, v5.4s
; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31
+; CHECK-GI-NEXT: cmlt v2.4s, v2.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v4.16b, v2.16b
; CHECK-GI-NEXT: bif v1.16b, v3.16b, v2.16b
; CHECK-GI-NEXT: ret
@@ -546,13 +531,13 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v6.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v6.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s4, w8
; CHECK-GI-NEXT: adrp x8, .LCPI20_0
-; CHECK-GI-NEXT: sshr v7.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v7.4s, v1.4s, #0
; CHECK-GI-NEXT: ldr q5, [x8, :lo12:.LCPI20_0]
-; CHECK-GI-NEXT: sshr v16.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v17.4s, v3.4s, #31
+; CHECK-GI-NEXT: cmlt v16.4s, v2.4s, #0
+; CHECK-GI-NEXT: cmlt v17.4s, v3.4s, #0
; CHECK-GI-NEXT: adrp x8, .LCPI20_1
; CHECK-GI-NEXT: mov v4.h[1], w9
; CHECK-GI-NEXT: neg v5.4s, v5.4s
@@ -574,7 +559,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
; CHECK-GI-NEXT: sshl v5.4s, v5.4s, v17.4s
; CHECK-GI-NEXT: ushll v4.4s, v4.4h, #0
; CHECK-GI-NEXT: shl v4.4s, v4.4s, #31
-; CHECK-GI-NEXT: sshr v4.4s, v4.4s, #31
+; CHECK-GI-NEXT: cmlt v4.4s, v4.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v6.16b, v4.16b
; CHECK-GI-NEXT: bif v1.16b, v7.16b, v4.16b
; CHECK-GI-NEXT: bif v2.16b, v16.16b, v4.16b
@@ -603,7 +588,7 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI21_1
-; CHECK-GI-NEXT: sshr v2.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v2.2d, v0.2d, #0
; CHECK-GI-NEXT: adrp x9, .LCPI21_0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI21_1]
; CHECK-GI-NEXT: adrp x8, .LCPI21_2
@@ -615,7 +600,7 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) {
; CHECK-GI-NEXT: neg v2.2d, v2.2d
; CHECK-GI-NEXT: add v1.2d, v0.2d, v1.2d
; CHECK-GI-NEXT: sshl v1.2d, v1.2d, v2.2d
-; CHECK-GI-NEXT: sshr v2.2d, v3.2d, #63
+; CHECK-GI-NEXT: cmlt v2.2d, v3.2d, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <2 x i64> %x, <i64 1, i64 4>
@@ -649,7 +634,7 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) {
; CHECK-GI-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI22_2
-; CHECK-GI-NEXT: sshr v3.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v3.2d, v0.2d, #0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI22_2]
; CHECK-GI-NEXT: adrp x8, .LCPI22_1
; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI22_1]
@@ -662,13 +647,13 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) {
; CHECK-GI-NEXT: adrp x8, .LCPI22_3
; CHECK-GI-NEXT: neg v5.2d, v5.2d
; CHECK-GI-NEXT: ushl v2.2d, v3.2d, v2.2d
-; CHECK-GI-NEXT: sshr v3.2d, v1.2d, #63
+; CHECK-GI-NEXT: cmlt v3.2d, v1.2d, #0
; CHECK-GI-NEXT: shl v6.2d, v6.2d, #63
; CHECK-GI-NEXT: add v2.2d, v0.2d, v2.2d
; CHECK-GI-NEXT: ushl v3.2d, v3.2d, v4.2d
; CHECK-GI-NEXT: ldr q4, [x8, :lo12:.LCPI22_3]
; CHECK-GI-NEXT: sshl v2.2d, v2.2d, v5.2d
-; CHECK-GI-NEXT: sshr v5.2d, v6.2d, #63
+; CHECK-GI-NEXT: cmlt v5.2d, v6.2d, #0
; CHECK-GI-NEXT: add v1.2d, v1.2d, v3.2d
; CHECK-GI-NEXT: neg v3.2d, v4.2d
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v5.16b
@@ -715,13 +700,13 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: mov w9, #0 // =0x0
-; CHECK-GI-NEXT: sshr v7.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v7.2d, v0.2d, #0
; CHECK-GI-NEXT: fmov s4, w8
; CHECK-GI-NEXT: adrp x8, .LCPI23_1
-; CHECK-GI-NEXT: sshr v16.2d, v1.2d, #63
+; CHECK-GI-NEXT: cmlt v16.2d, v1.2d, #0
; CHECK-GI-NEXT: ldr q5, [x8, :lo12:.LCPI23_1]
-; CHECK-GI-NEXT: sshr v17.2d, v2.2d, #63
-; CHECK-GI-NEXT: sshr v18.2d, v3.2d, #63
+; CHECK-GI-NEXT: cmlt v17.2d, v2.2d, #0
+; CHECK-GI-NEXT: cmlt v18.2d, v3.2d, #0
; CHECK-GI-NEXT: adrp x8, .LCPI23_3
; CHECK-GI-NEXT: mov v4.h[1], w9
; CHECK-GI-NEXT: neg v5.2d, v5.2d
@@ -754,9 +739,9 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) {
; CHECK-GI-NEXT: shl v4.2d, v4.2d, #63
; CHECK-GI-NEXT: sshl v16.2d, v16.2d, v20.2d
; CHECK-GI-NEXT: sshl v6.2d, v6.2d, v20.2d
-; CHECK-GI-NEXT: sshr v17.2d, v17.2d, #63
-; CHECK-GI-NEXT: sshr v18.2d, v18.2d, #63
-; CHECK-GI-NEXT: sshr v4.2d, v4.2d, #63
+; CHECK-GI-NEXT: cmlt v17.2d, v17.2d, #0
+; CHECK-GI-NEXT: cmlt v18.2d, v18.2d, #0
+; CHECK-GI-NEXT: cmlt v4.2d, v4.2d, #0
; CHECK-GI-NEXT: bif v0.16b, v7.16b, v17.16b
; CHECK-GI-NEXT: bif v1.16b, v16.16b, v18.16b
; CHECK-GI-NEXT: bif v2.16b, v5.16b, v4.16b
@@ -792,7 +777,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_PosAndNeg(<4 x i32> %x) {
; CHECK-GI-NEXT: adrp x10, .LCPI24_0
; CHECK-GI-NEXT: fmov s1, w8
; CHECK-GI-NEXT: ldr q2, [x10, :lo12:.LCPI24_0]
-; CHECK-GI-NEXT: sshr v3.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v3.4s, v0.4s, #0
; CHECK-GI-NEXT: fmov s4, w9
; CHECK-GI-NEXT: adrp x10, .LCPI24_1
; CHECK-GI-NEXT: neg v2.4s, v2.4s
@@ -807,10 +792,10 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b_PosAndNeg(<4 x i32> %x) {
; CHECK-GI-NEXT: mov v1.s[3], w9
; CHECK-GI-NEXT: sshl v2.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: shl v1.4s, v4.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: neg v2.4s, v0.4s
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
@@ -871,7 +856,7 @@ define <16 x i8> @non_splat_minus_one_divisor_0(<16 x i8> %A) {
; CHECK-GI-NEXT: neg v2.16b, v0.16b
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI25_0]
; CHECK-GI-NEXT: shl v1.16b, v1.16b, #7
-; CHECK-GI-NEXT: sshr v1.16b, v1.16b, #7
+; CHECK-GI-NEXT: cmlt v1.16b, v1.16b, #0
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%div = sdiv <16 x i8> %A, <i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -901,7 +886,7 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; CHECK-GI-LABEL: non_splat_minus_one_divisor_1:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: adrp x8, .LCPI26_2
-; CHECK-GI-NEXT: sshr v2.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v0.16b, #0
; CHECK-GI-NEXT: adrp x9, .LCPI26_1
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI26_2]
; CHECK-GI-NEXT: adrp x8, .LCPI26_3
@@ -914,11 +899,11 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; CHECK-GI-NEXT: neg v2.16b, v2.16b
; CHECK-GI-NEXT: add v1.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: sshl v1.16b, v1.16b, v2.16b
-; CHECK-GI-NEXT: sshr v2.16b, v3.16b, #7
+; CHECK-GI-NEXT: cmlt v2.16b, v3.16b, #0
; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI26_0]
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: shl v1.16b, v3.16b, #7
-; CHECK-GI-NEXT: sshr v1.16b, v1.16b, #7
+; CHECK-GI-NEXT: cmlt v1.16b, v1.16b, #0
; CHECK-GI-NEXT: neg v2.16b, v0.16b
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
@@ -954,7 +939,7 @@ define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) {
; CHECK-GI-NEXT: fmov s1, w8
; CHECK-GI-NEXT: ldr q2, [x9, :lo12:.LCPI27_0]
; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: sshr v3.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v3.4s, v0.4s, #0
; CHECK-GI-NEXT: adrp x9, .LCPI27_1
; CHECK-GI-NEXT: neg v2.4s, v2.4s
; CHECK-GI-NEXT: mov v1.s[1], w8
@@ -969,10 +954,10 @@ define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) {
; CHECK-GI-NEXT: sshl v2.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: mov v4.s[3], w8
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: shl v1.4s, v4.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: neg v2.4s, v0.4s
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
@@ -1207,7 +1192,7 @@ define <8 x i16> @combine_vec_sdiv_nonuniform7(<8 x i16> %x) {
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI34_0]
; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
; CHECK-GI-NEXT: shl v1.8h, v1.8h, #15
-; CHECK-GI-NEXT: sshr v1.8h, v1.8h, #15
+; CHECK-GI-NEXT: cmlt v1.8h, v1.8h, #0
; CHECK-GI-NEXT: bit v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
%1 = sdiv <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 1, i16 1, i16 1, i16 1>
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
index 121cc30..babb4ed 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -605,7 +605,7 @@ define i32 @extract_v4i32_select(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x i1> %c
; CHECK-GI-NEXT: mov w8, w0
; CHECK-GI-NEXT: and x8, x8, #0x3
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: str q0, [sp]
; CHECK-GI-NEXT: ldr w0, [x9, x8, lsl #2]
@@ -634,7 +634,7 @@ define i32 @extract_v4i32_select_const(<4 x i32> %a, <4 x i32> %b, i32 %c, <4 x
; CHECK-GI-NEXT: adrp x8, .LCPI23_0
; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI23_0]
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: mov s0, v0.s[2]
; CHECK-GI-NEXT: fmov w0, s0
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index 6d673f1..30fb82e 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -661,7 +661,7 @@ define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double>
; CHECK-GI-NEXT: ldp x30, x19, [sp, #64] // 16-byte Folded Reload
; CHECK-GI-NEXT: mov v0.d[1], x8
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: bsl v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: add sp, sp, #80
; CHECK-GI-NEXT: ret
@@ -1540,7 +1540,7 @@ define <7 x i32> @v7f16_i32(<7 x half> %a, <7 x half> %b, <7 x i32> %d, <7 x i32
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
; CHECK-GI-FP16-NEXT: mov v1.s[2], w8
; CHECK-GI-FP16-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-FP16-NEXT: fmov s4, w8
; CHECK-GI-FP16-NEXT: mov v4.s[1], w8
; CHECK-GI-FP16-NEXT: ushl v1.4s, v1.4s, v2.4s
@@ -1602,7 +1602,7 @@ define <4 x i32> @v4f16_i32(<4 x half> %a, <4 x half> %b, <4 x i32> %d, <4 x i32
; CHECK-GI-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h
; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-FP16-NEXT: bsl v0.16b, v2.16b, v3.16b
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -1657,8 +1657,8 @@ define <8 x i32> @v8f16_i32(<8 x half> %a, <8 x half> %b, <8 x i32> %d, <8 x i32
; CHECK-GI-FP16-NEXT: ushll2 v0.4s, v0.8h, #0
; CHECK-GI-FP16-NEXT: shl v1.4s, v1.4s, #31
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v1.4s, v1.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v6.4s, v0.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v1.4s, v1.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v6.4s, v0.4s, #0
; CHECK-GI-FP16-NEXT: mov v0.16b, v1.16b
; CHECK-GI-FP16-NEXT: mov v1.16b, v6.16b
; CHECK-GI-FP16-NEXT: bsl v0.16b, v2.16b, v4.16b
@@ -1748,10 +1748,10 @@ define <16 x i32> @v16f16_i32(<16 x half> %a, <16 x half> %b, <16 x i32> %d, <16
; CHECK-GI-FP16-NEXT: shl v0.4s, v0.4s, #31
; CHECK-GI-FP16-NEXT: shl v3.4s, v3.4s, #31
; CHECK-GI-FP16-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v2.4s, v2.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v16.4s, v0.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v3.4s, v3.4s, #31
-; CHECK-GI-FP16-NEXT: sshr v17.4s, v1.4s, #31
+; CHECK-GI-FP16-NEXT: cmlt v2.4s, v2.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v16.4s, v0.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v3.4s, v3.4s, #0
+; CHECK-GI-FP16-NEXT: cmlt v17.4s, v1.4s, #0
; CHECK-GI-FP16-NEXT: ldp q0, q1, [sp]
; CHECK-GI-FP16-NEXT: bit v0.16b, v4.16b, v2.16b
; CHECK-GI-FP16-NEXT: mov v2.16b, v3.16b
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat.ll b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
index 00de153..24be923 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
@@ -111,14 +111,14 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) {
+; CHECK-CVT-LABEL: utest_f16i32:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: fcvtzu w0, s0
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i32:
+; CHECK-FP16-LABEL: utest_f16i32:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w0, h0
; CHECK-FP16-NEXT: ret
@@ -298,8 +298,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i16:
+define i16 @utest_f16i16(half %x) {
+; CHECK-CVT-LABEL: utest_f16i16:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: mov w9, #65535 // =0xffff
@@ -308,7 +308,7 @@ define i16 @utesth_f16i16(half %x) {
; CHECK-CVT-NEXT: csel w0, w8, w9, lo
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i16:
+; CHECK-FP16-LABEL: utest_f16i16:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w8, h0
; CHECK-FP16-NEXT: mov w9, #65535 // =0xffff
@@ -493,8 +493,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64(half %x) {
-; CHECK-LABEL: utesth_f16i64:
+define i64 @utest_f16i64(half %x) {
+; CHECK-LABEL: utest_f16i64:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
@@ -636,14 +636,14 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_mm(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i32_mm:
+define i32 @utest_f16i32_mm(half %x) {
+; CHECK-CVT-LABEL: utest_f16i32_mm:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: fcvtzu w0, s0
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-LABEL: utest_f16i32_mm:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w0, h0
; CHECK-FP16-NEXT: ret
@@ -808,8 +808,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_mm(half %x) {
-; CHECK-CVT-LABEL: utesth_f16i16_mm:
+define i16 @utest_f16i16_mm(half %x) {
+; CHECK-CVT-LABEL: utest_f16i16_mm:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvt s0, h0
; CHECK-CVT-NEXT: mov w9, #65535 // =0xffff
@@ -818,7 +818,7 @@ define i16 @utesth_f16i16_mm(half %x) {
; CHECK-CVT-NEXT: csel w0, w8, w9, lo
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-LABEL: utest_f16i16_mm:
; CHECK-FP16: // %bb.0: // %entry
; CHECK-FP16-NEXT: fcvtzu w8, h0
; CHECK-FP16-NEXT: mov w9, #65535 // =0xffff
@@ -986,8 +986,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm(half %x) {
-; CHECK-LABEL: utesth_f16i64_mm:
+define i64 @utest_f16i64_mm(half %x) {
+; CHECK-LABEL: utest_f16i64_mm:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
@@ -1026,6 +1026,29 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; CHECK-CVT-LABEL: ustest_f16i32_nsat:
+; CHECK-CVT: // %bb.0:
+; CHECK-CVT-NEXT: fcvt s0, h0
+; CHECK-CVT-NEXT: fcvtzs w8, s0
+; CHECK-CVT-NEXT: and w8, w8, w8, asr #31
+; CHECK-CVT-NEXT: bic w0, w8, w8, asr #31
+; CHECK-CVT-NEXT: ret
+;
+; CHECK-FP16-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16: // %bb.0:
+; CHECK-FP16-NEXT: fcvtzs w8, h0
+; CHECK-FP16-NEXT: and w8, w8, w8, asr #31
+; CHECK-FP16-NEXT: bic w0, w8, w8, asr #31
+; CHECK-FP16-NEXT: ret
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index b09a867..637c028 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -321,20 +321,20 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i32:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i32:
+; CHECK-FP16-SD-LABEL: utest_f16i32:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-FP16-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i32:
+; CHECK-CVT-GI-LABEL: utest_f16i32:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-GI-NEXT: movi v1.2d, #0x000000ffffffff
@@ -349,7 +349,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-CVT-GI-NEXT: uzp1 v0.4s, v2.4s, v0.4s
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i32:
+; CHECK-FP16-GI-LABEL: utest_f16i32:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h2, v0.h[1]
@@ -614,8 +614,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-CVT-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-CVT-LABEL: utest_f16i16:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvtl v1.4s, v0.4h
; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
@@ -625,12 +625,12 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-CVT-NEXT: uqxtn2 v0.8h, v2.4s
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i16:
+; CHECK-FP16-SD-LABEL: utest_f16i16:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtzu v0.8h, v0.8h
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i16:
+; CHECK-FP16-GI-LABEL: utest_f16i16:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: fcvtl v1.4s, v0.4h
; CHECK-FP16-GI-NEXT: fcvtl2 v0.4s, v0.8h
@@ -1746,8 +1746,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i64:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: sub sp, sp, #48
; CHECK-CVT-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -1777,7 +1777,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-CVT-SD-NEXT: add sp, sp, #48
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i64:
+; CHECK-FP16-SD-LABEL: utest_f16i64:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: sub sp, sp, #48
; CHECK-FP16-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -1807,7 +1807,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-FP16-SD-NEXT: add sp, sp, #48
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i64:
+; CHECK-CVT-GI-LABEL: utest_f16i64:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-CVT-GI-NEXT: mov h1, v0.h[1]
@@ -1819,7 +1819,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-CVT-GI-NEXT: mov v0.d[1], x9
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i64:
+; CHECK-FP16-GI-LABEL: utest_f16i64:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h1, v0.h[1]
@@ -2307,20 +2307,20 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i32_mm:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-SD-LABEL: utest_f16i32_mm:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
; CHECK-FP16-SD-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i32_mm:
+; CHECK-CVT-GI-LABEL: utest_f16i32_mm:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
; CHECK-CVT-GI-NEXT: movi v1.2d, #0x000000ffffffff
@@ -2335,7 +2335,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-CVT-GI-NEXT: uzp1 v0.4s, v2.4s, v0.4s
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-GI-LABEL: utest_f16i32_mm:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h2, v0.h[1]
@@ -2585,8 +2585,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-CVT-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-CVT-LABEL: utest_f16i16_mm:
; CHECK-CVT: // %bb.0: // %entry
; CHECK-CVT-NEXT: fcvtl v1.4s, v0.4h
; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h
@@ -2596,12 +2596,12 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-CVT-NEXT: uqxtn2 v0.8h, v2.4s
; CHECK-CVT-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-SD-LABEL: utest_f16i16_mm:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: fcvtzu v0.8h, v0.8h
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-GI-LABEL: utest_f16i16_mm:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: fcvtl v1.4s, v0.4h
; CHECK-FP16-GI-NEXT: fcvtl2 v0.4s, v0.8h
@@ -3694,8 +3694,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-CVT-SD-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-CVT-SD-LABEL: utest_f16i64_mm:
; CHECK-CVT-SD: // %bb.0: // %entry
; CHECK-CVT-SD-NEXT: sub sp, sp, #48
; CHECK-CVT-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -3725,7 +3725,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-CVT-SD-NEXT: add sp, sp, #48
; CHECK-CVT-SD-NEXT: ret
;
-; CHECK-FP16-SD-LABEL: utesth_f16i64_mm:
+; CHECK-FP16-SD-LABEL: utest_f16i64_mm:
; CHECK-FP16-SD: // %bb.0: // %entry
; CHECK-FP16-SD-NEXT: sub sp, sp, #48
; CHECK-FP16-SD-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
@@ -3755,7 +3755,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-FP16-SD-NEXT: add sp, sp, #48
; CHECK-FP16-SD-NEXT: ret
;
-; CHECK-CVT-GI-LABEL: utesth_f16i64_mm:
+; CHECK-CVT-GI-LABEL: utest_f16i64_mm:
; CHECK-CVT-GI: // %bb.0: // %entry
; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-CVT-GI-NEXT: mov h1, v0.h[1]
@@ -3767,7 +3767,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-CVT-GI-NEXT: mov v0.d[1], x9
; CHECK-CVT-GI-NEXT: ret
;
-; CHECK-FP16-GI-LABEL: utesth_f16i64_mm:
+; CHECK-FP16-GI-LABEL: utest_f16i64_mm:
; CHECK-FP16-GI: // %bb.0: // %entry
; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-FP16-GI-NEXT: mov h1, v0.h[1]
@@ -3941,6 +3941,51 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-CVT-SD-LABEL: ustest_f16i32_nsat:
+; CHECK-CVT-SD: // %bb.0: // %entry
+; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-SD-NEXT: movi v1.2d, #0000000000000000
+; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-SD-NEXT: smin v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-CVT-SD-NEXT: ret
+;
+; CHECK-FP16-SD-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16-SD: // %bb.0: // %entry
+; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-FP16-SD-NEXT: movi v1.2d, #0000000000000000
+; CHECK-FP16-SD-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-FP16-SD-NEXT: smin v0.4s, v0.4s, v1.4s
+; CHECK-FP16-SD-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-FP16-SD-NEXT: ret
+;
+; CHECK-CVT-GI-LABEL: ustest_f16i32_nsat:
+; CHECK-CVT-GI: // %bb.0: // %entry
+; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-CVT-GI-NEXT: movi v1.2d, #0000000000000000
+; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-CVT-GI-NEXT: smin v0.4s, v1.4s, v0.4s
+; CHECK-CVT-GI-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-CVT-GI-NEXT: ret
+;
+; CHECK-FP16-GI-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16-GI: // %bb.0: // %entry
+; CHECK-FP16-GI-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-FP16-GI-NEXT: movi v1.2d, #0000000000000000
+; CHECK-FP16-GI-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-FP16-GI-NEXT: smin v0.4s, v1.4s, v0.4s
+; CHECK-FP16-GI-NEXT: smax v0.4s, v0.4s, v1.4s
+; CHECK-FP16-GI-NEXT: ret
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index 0c84468f..2026959 100644
--- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1110,7 +1110,7 @@ define <8 x i8> @vselect_constant_cond_zero_v8i8(<8 x i8> %a) {
; CHECK-GI-NEXT: adrp x8, .LCPI83_0
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI83_0]
; CHECK-GI-NEXT: shl v1.8b, v1.8b, #7
-; CHECK-GI-NEXT: sshr v1.8b, v1.8b, #7
+; CHECK-GI-NEXT: cmlt v1.8b, v1.8b, #0
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: ret
%b = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i8> %a, <8 x i8> zeroinitializer
@@ -1133,7 +1133,7 @@ define <4 x i16> @vselect_constant_cond_zero_v4i16(<4 x i16> %a) {
; CHECK-GI-NEXT: mov v1.h[2], w9
; CHECK-GI-NEXT: mov v1.h[3], w8
; CHECK-GI-NEXT: shl v1.4h, v1.4h, #15
-; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #15
+; CHECK-GI-NEXT: cmlt v1.4h, v1.4h, #0
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: ret
%b = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> %a, <4 x i16> zeroinitializer
@@ -1157,7 +1157,7 @@ define <4 x i32> @vselect_constant_cond_zero_v4i32(<4 x i32> %a) {
; CHECK-GI-NEXT: mov v1.s[2], w9
; CHECK-GI-NEXT: mov v1.s[3], w8
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: ret
%b = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> %a, <4 x i32> zeroinitializer
@@ -1176,7 +1176,7 @@ define <8 x i8> @vselect_constant_cond_v8i8(<8 x i8> %a, <8 x i8> %b) {
; CHECK-GI-NEXT: adrp x8, .LCPI86_0
; CHECK-GI-NEXT: ldr d2, [x8, :lo12:.LCPI86_0]
; CHECK-GI-NEXT: shl v2.8b, v2.8b, #7
-; CHECK-GI-NEXT: sshr v2.8b, v2.8b, #7
+; CHECK-GI-NEXT: cmlt v2.8b, v2.8b, #0
; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-GI-NEXT: ret
%c = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i8> %a, <8 x i8> %b
@@ -1199,7 +1199,7 @@ define <4 x i16> @vselect_constant_cond_v4i16(<4 x i16> %a, <4 x i16> %b) {
; CHECK-GI-NEXT: mov v2.h[2], w9
; CHECK-GI-NEXT: mov v2.h[3], w8
; CHECK-GI-NEXT: shl v2.4h, v2.4h, #15
-; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #15
+; CHECK-GI-NEXT: cmlt v2.4h, v2.4h, #0
; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b
; CHECK-GI-NEXT: ret
%c = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> %a, <4 x i16> %b
@@ -1223,7 +1223,7 @@ define <4 x i32> @vselect_constant_cond_v4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-GI-NEXT: mov v2.s[2], w9
; CHECK-GI-NEXT: mov v2.s[3], w8
; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31
+; CHECK-GI-NEXT: cmlt v2.4s, v2.4s, #0
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-GI-NEXT: ret
%c = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> %a, <4 x i32> %b
diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index fb8b721..11b3b62 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -966,7 +966,7 @@ define <8 x i8> @cmgez8xi8_alt(<8 x i8> %A) {
;
; CHECK-GI-LABEL: cmgez8xi8_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8b, v0.8b, #7
+; CHECK-GI-NEXT: cmlt v0.8b, v0.8b, #0
; CHECK-GI-NEXT: mvn v0.8b, v0.8b
; CHECK-GI-NEXT: ret
%sign = ashr <8 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -982,7 +982,7 @@ define <16 x i8> @cmgez16xi8_alt(<16 x i8> %A) {
;
; CHECK-GI-LABEL: cmgez16xi8_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <16 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -998,7 +998,7 @@ define <4 x i16> @cmgez4xi16_alt(<4 x i16> %A) {
;
; CHECK-GI-LABEL: cmgez4xi16_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #15
+; CHECK-GI-NEXT: cmlt v0.4h, v0.4h, #0
; CHECK-GI-NEXT: mvn v0.8b, v0.8b
; CHECK-GI-NEXT: ret
%sign = ashr <4 x i16> %A, <i16 15, i16 15, i16 15, i16 15>
@@ -1014,7 +1014,7 @@ define <8 x i16> @cmgez8xi16_alt(<8 x i16> %A) {
;
; CHECK-GI-LABEL: cmgez8xi16_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v0.8h, v0.8h, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <8 x i16> %A, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -1030,7 +1030,7 @@ define <2 x i32> @cmgez2xi32_alt(<2 x i32> %A) {
;
; CHECK-GI-LABEL: cmgez2xi32_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: cmlt v0.2s, v0.2s, #0
; CHECK-GI-NEXT: mvn v0.8b, v0.8b
; CHECK-GI-NEXT: ret
%sign = ashr <2 x i32> %A, <i32 31, i32 31>
@@ -1046,7 +1046,7 @@ define <4 x i32> @cmgez4xi32_alt(<4 x i32> %A) {
;
; CHECK-GI-LABEL: cmgez4xi32_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <4 x i32> %A, <i32 31, i32 31, i32 31, i32 31>
@@ -1062,7 +1062,7 @@ define <2 x i64> @cmgez2xi64_alt(<2 x i64> %A) {
;
; CHECK-GI-LABEL: cmgez2xi64_alt:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: mvn v0.16b, v0.16b
; CHECK-GI-NEXT: ret
%sign = ashr <2 x i64> %A, <i64 63, i64 63>
@@ -1503,99 +1503,64 @@ entry:
}
define <8 x i8> @cmltz8xi8_alt(<8 x i8> %A) {
-; CHECK-SD-LABEL: cmltz8xi8_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.8b, v0.8b, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz8xi8_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8b, v0.8b, #7
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz8xi8_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.8b, v0.8b, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <8 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
ret <8 x i8> %A.lobit
}
define <16 x i8> @cmltz16xi8_alt(<16 x i8> %A) {
-; CHECK-SD-LABEL: cmltz16xi8_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.16b, v0.16b, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz16xi8_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz16xi8_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <16 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
ret <16 x i8> %A.lobit
}
define <4 x i16> @cmltz4xi16_alt(<4 x i16> %A) {
-; CHECK-SD-LABEL: cmltz4xi16_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.4h, v0.4h, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz4xi16_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #15
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz4xi16_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.4h, v0.4h, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <4 x i16> %A, <i16 15, i16 15, i16 15, i16 15>
ret <4 x i16> %A.lobit
}
define <8 x i16> @cmltz8xi16_alt(<8 x i16> %A) {
-; CHECK-SD-LABEL: cmltz8xi16_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.8h, v0.8h, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz8xi16_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz8xi16_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.8h, v0.8h, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <8 x i16> %A, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <8 x i16> %A.lobit
}
define <2 x i32> @cmltz2xi32_alt(<2 x i32> %A) {
-; CHECK-SD-LABEL: cmltz2xi32_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.2s, v0.2s, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz2xi32_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz2xi32_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.2s, v0.2s, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <2 x i32> %A, <i32 31, i32 31>
ret <2 x i32> %A.lobit
}
define <4 x i32> @cmltz4xi32_alt(<4 x i32> %A) {
-; CHECK-SD-LABEL: cmltz4xi32_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.4s, v0.4s, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz4xi32_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz4xi32_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.4s, v0.4s, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <4 x i32> %A, <i32 31, i32 31, i32 31, i32 31>
ret <4 x i32> %A.lobit
}
define <2 x i64> @cmltz2xi64_alt(<2 x i64> %A) {
-; CHECK-SD-LABEL: cmltz2xi64_alt:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmlt v0.2d, v0.2d, #0
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: cmltz2xi64_alt:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: cmltz2xi64_alt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmlt v0.2d, v0.2d, #0
+; CHECK-NEXT: ret
%A.lobit = ashr <2 x i64> %A, <i64 63, i64 63>
ret <2 x i64> %A.lobit
}
@@ -2523,7 +2488,7 @@ define <2 x i32> @fcmal2xfloat(<2 x float> %A, <2 x float> %B) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: movi v0.2s, #1
; CHECK-GI-NEXT: shl v0.2s, v0.2s, #31
-; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: cmlt v0.2s, v0.2s, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp true <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
@@ -2542,7 +2507,7 @@ define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) {
; CHECK-GI-NEXT: dup v0.2s, w8
; CHECK-GI-NEXT: mov v0.d[1], v0.d[0]
; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp true <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
@@ -2559,7 +2524,7 @@ define <2 x i64> @fcmal2xdouble(<2 x double> %A, <2 x double> %B) {
; CHECK-GI-NEXT: adrp x8, .LCPI221_0
; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI221_0]
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp true <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
@@ -2589,7 +2554,7 @@ define <4 x i32> @fcmnv4xfloat(<4 x float> %A, <4 x float> %B) {
; CHECK-GI-NEXT: mov v0.s[1], w8
; CHECK-GI-NEXT: mov v0.d[1], v0.d[0]
; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: ret
%tmp3 = fcmp false <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll b/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll
index 282f437..a8c55b4 100644
--- a/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll
+++ b/llvm/test/CodeGen/AArch64/neon-shift-left-long.ll
@@ -465,7 +465,7 @@ define <8 x i16> @test_ushll_cmp(<8 x i8> %a, <8 x i8> %b) #0 {
; CHECK-GI-NEXT: movi v1.2d, #0xff00ff00ff00ff
; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-GI-NEXT: shl v0.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v0.8h, v0.8h, #0
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: ret
%cmp.i = icmp eq <8 x i8> %a, %b
diff --git a/llvm/test/CodeGen/AArch64/select_cc.ll b/llvm/test/CodeGen/AArch64/select_cc.ll
index 483f6c2..b562340 100644
--- a/llvm/test/CodeGen/AArch64/select_cc.ll
+++ b/llvm/test/CodeGen/AArch64/select_cc.ll
@@ -98,7 +98,7 @@ define <2 x double> @select_olt_load_cmp(<2 x double> %a, ptr %src) {
; CHECK-GI-NEXT: fcmgt v1.2s, v1.2s, #0.0
; CHECK-GI-NEXT: ushll v1.2d, v1.2s, #0
; CHECK-GI-NEXT: shl v1.2d, v1.2d, #63
-; CHECK-GI-NEXT: sshr v1.2d, v1.2d, #63
+; CHECK-GI-NEXT: cmlt v1.2d, v1.2d, #0
; CHECK-GI-NEXT: bif v0.16b, v2.16b, v1.16b
; CHECK-GI-NEXT: ret
entry:
@@ -136,7 +136,7 @@ define <4 x i32> @select_icmp_sgt(<4 x i32> %a, <4 x i8> %b) {
; CHECK-GI-NEXT: mov v2.s[2], w8
; CHECK-GI-NEXT: mov v2.s[3], w9
; CHECK-GI-NEXT: shl v1.4s, v2.4s, #31
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
+; CHECK-GI-NEXT: cmlt v1.4s, v1.4s, #0
; CHECK-GI-NEXT: bic v0.16b, v0.16b, v1.16b
; CHECK-GI-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
index 293b74ec..96a7a9d0 100644
--- a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
@@ -255,7 +255,7 @@ define <16 x i8> @sel_shift_bool_v16i8(<16 x i1> %t) {
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: shl v0.16b, v0.16b, #7
; CHECK-GI-NEXT: movi v1.16b, #128
-; CHECK-GI-NEXT: sshr v0.16b, v0.16b, #7
+; CHECK-GI-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl = select <16 x i1> %t, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>, <16 x i8> zeroinitializer
@@ -277,7 +277,7 @@ define <8 x i16> @sel_shift_bool_v8i16(<8 x i1> %t) {
; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-GI-NEXT: movi v1.8h, #128
; CHECK-GI-NEXT: shl v0.8h, v0.8h, #15
-; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #15
+; CHECK-GI-NEXT: cmlt v0.8h, v0.8h, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl= select <8 x i1> %t, <8 x i16> <i16 128, i16 128, i16 128, i16 128, i16 128, i16 128, i16 128, i16 128>, <8 x i16> zeroinitializer
@@ -299,7 +299,7 @@ define <4 x i32> @sel_shift_bool_v4i32(<4 x i1> %t) {
; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-NEXT: movi v1.4s, #64
; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: cmlt v0.4s, v0.4s, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl = select <4 x i1> %t, <4 x i32> <i32 64, i32 64, i32 64, i32 64>, <4 x i32> zeroinitializer
@@ -323,7 +323,7 @@ define <2 x i64> @sel_shift_bool_v2i64(<2 x i1> %t) {
; CHECK-GI-NEXT: adrp x8, .LCPI16_0
; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_0]
; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
-; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: cmlt v0.2d, v0.2d, #0
; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
; CHECK-GI-NEXT: ret
%shl = select <2 x i1> %t, <2 x i64> <i64 65536, i64 65536>, <2 x i64> zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
index 1de8d0a..01e3d3a 100644
--- a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
@@ -68,13 +68,12 @@ entry:
}
; SVE calling conventions
-; Predicate register spills end up in FP region, currently. This can be
-; mitigated with the -aarch64-enable-zpr-predicate-spills option.
+; Padding is placed between predicate and fpr/zpr register spills, so only emit remarks when hazard padding is off.
+; Note: The -aarch64-enable-zpr-predicate-spills option is deprecated (and will be removed soon).
define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 {
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale]
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
-; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-1088-258 * vscale] is too close to FPR stack object at [SP-1088-256 * vscale]
; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_call':
; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
@@ -89,7 +88,6 @@ entry:
define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 {
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale]
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
-; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-1088-258 * vscale] is too close to FPR stack object at [SP-1088-256 * vscale]
; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call':
; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 333a8be..bdee359 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=0 | FileCheck %s --check-prefixes=CHECK,CHECK0
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=64 | FileCheck %s --check-prefixes=CHECK,CHECK64
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects=false -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE
define i32 @basic(i32 noundef %num) {
; CHECK-LABEL: basic:
@@ -1940,23 +1940,22 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
;
; CHECK64-LABEL: svecc_call:
; CHECK64: // %bb.0: // %entry
-; CHECK64-NEXT: sub sp, sp, #128
-; CHECK64-NEXT: .cfi_def_cfa_offset 128
+; CHECK64-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK64-NEXT: .cfi_def_cfa_offset 64
; CHECK64-NEXT: cntd x9
-; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
-; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill
-; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill
-; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill
-; CHECK64-NEXT: add x29, sp, #64
+; CHECK64-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK64-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK64-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK64-NEXT: mov x29, sp
; CHECK64-NEXT: .cfi_def_cfa w29, 64
-; CHECK64-NEXT: .cfi_offset w19, -16
-; CHECK64-NEXT: .cfi_offset w26, -24
-; CHECK64-NEXT: .cfi_offset w27, -32
-; CHECK64-NEXT: .cfi_offset w28, -40
+; CHECK64-NEXT: .cfi_offset w19, -8
+; CHECK64-NEXT: .cfi_offset w26, -16
+; CHECK64-NEXT: .cfi_offset w27, -24
+; CHECK64-NEXT: .cfi_offset w28, -32
; CHECK64-NEXT: .cfi_offset vg, -48
; CHECK64-NEXT: .cfi_offset w30, -56
; CHECK64-NEXT: .cfi_offset w29, -64
-; CHECK64-NEXT: addvl sp, sp, #-18
+; CHECK64-NEXT: addvl sp, sp, #-2
; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -1969,30 +1968,32 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
-; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * IncomingVG - 128
+; CHECK64-NEXT: sub sp, sp, #64
+; CHECK64-NEXT: addvl sp, sp, #-16
+; CHECK64-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK64-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 24 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 32 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 40 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 48 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 56 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 64 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 72 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 80 * IncomingVG - 128
; CHECK64-NEXT: sub sp, sp, #64
; CHECK64-NEXT: mov x8, x0
; CHECK64-NEXT: bl __arm_sme_state
@@ -2014,22 +2015,32 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: mov w0, #22647 // =0x5877
; CHECK64-NEXT: movk w0, #59491, lsl #16
; CHECK64-NEXT: add sp, sp, #64
-; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: add sp, sp, #64
+; CHECK64-NEXT: addvl sp, sp, #16
+; CHECK64-NEXT: .cfi_restore z8
+; CHECK64-NEXT: .cfi_restore z9
+; CHECK64-NEXT: .cfi_restore z10
+; CHECK64-NEXT: .cfi_restore z11
+; CHECK64-NEXT: .cfi_restore z12
+; CHECK64-NEXT: .cfi_restore z13
+; CHECK64-NEXT: .cfi_restore z14
+; CHECK64-NEXT: .cfi_restore z15
; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
@@ -2042,20 +2053,11 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK64-NEXT: addvl sp, sp, #18
-; CHECK64-NEXT: .cfi_restore z8
-; CHECK64-NEXT: .cfi_restore z9
-; CHECK64-NEXT: .cfi_restore z10
-; CHECK64-NEXT: .cfi_restore z11
-; CHECK64-NEXT: .cfi_restore z12
-; CHECK64-NEXT: .cfi_restore z13
-; CHECK64-NEXT: .cfi_restore z14
-; CHECK64-NEXT: .cfi_restore z15
-; CHECK64-NEXT: .cfi_def_cfa wsp, 128
-; CHECK64-NEXT: ldp x26, x19, [sp, #104] // 16-byte Folded Reload
-; CHECK64-NEXT: ldp x28, x27, [sp, #88] // 16-byte Folded Reload
-; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
-; CHECK64-NEXT: add sp, sp, #128
+; CHECK64-NEXT: addvl sp, sp, #2
+; CHECK64-NEXT: .cfi_def_cfa wsp, 64
+; CHECK64-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
; CHECK64-NEXT: .cfi_def_cfa_offset 0
; CHECK64-NEXT: .cfi_restore w19
; CHECK64-NEXT: .cfi_restore w26
@@ -2463,23 +2465,22 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
;
; CHECK64-LABEL: svecc_alloca_call:
; CHECK64: // %bb.0: // %entry
-; CHECK64-NEXT: sub sp, sp, #128
-; CHECK64-NEXT: .cfi_def_cfa_offset 128
+; CHECK64-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK64-NEXT: .cfi_def_cfa_offset 64
; CHECK64-NEXT: cntd x9
-; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
-; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill
-; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill
-; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill
-; CHECK64-NEXT: add x29, sp, #64
+; CHECK64-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK64-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK64-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK64-NEXT: mov x29, sp
; CHECK64-NEXT: .cfi_def_cfa w29, 64
-; CHECK64-NEXT: .cfi_offset w19, -16
-; CHECK64-NEXT: .cfi_offset w26, -24
-; CHECK64-NEXT: .cfi_offset w27, -32
-; CHECK64-NEXT: .cfi_offset w28, -40
+; CHECK64-NEXT: .cfi_offset w19, -8
+; CHECK64-NEXT: .cfi_offset w26, -16
+; CHECK64-NEXT: .cfi_offset w27, -24
+; CHECK64-NEXT: .cfi_offset w28, -32
; CHECK64-NEXT: .cfi_offset vg, -48
; CHECK64-NEXT: .cfi_offset w30, -56
; CHECK64-NEXT: .cfi_offset w29, -64
-; CHECK64-NEXT: addvl sp, sp, #-18
+; CHECK64-NEXT: addvl sp, sp, #-2
; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -2492,30 +2493,32 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
-; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * IncomingVG - 128
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * IncomingVG - 128
+; CHECK64-NEXT: sub sp, sp, #64
+; CHECK64-NEXT: addvl sp, sp, #-16
+; CHECK64-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK64-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 24 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 32 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 40 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 48 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 56 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 64 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 72 * IncomingVG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 80 * IncomingVG - 128
; CHECK64-NEXT: sub sp, sp, #112
; CHECK64-NEXT: bl __arm_sme_state
; CHECK64-NEXT: mov x19, x0
@@ -2536,22 +2539,32 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: mov w0, #22647 // =0x5877
; CHECK64-NEXT: movk w0, #59491, lsl #16
; CHECK64-NEXT: add sp, sp, #112
-; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: add sp, sp, #64
+; CHECK64-NEXT: addvl sp, sp, #16
+; CHECK64-NEXT: .cfi_restore z8
+; CHECK64-NEXT: .cfi_restore z9
+; CHECK64-NEXT: .cfi_restore z10
+; CHECK64-NEXT: .cfi_restore z11
+; CHECK64-NEXT: .cfi_restore z12
+; CHECK64-NEXT: .cfi_restore z13
+; CHECK64-NEXT: .cfi_restore z14
+; CHECK64-NEXT: .cfi_restore z15
; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
@@ -2564,20 +2577,11 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK64-NEXT: addvl sp, sp, #18
-; CHECK64-NEXT: .cfi_restore z8
-; CHECK64-NEXT: .cfi_restore z9
-; CHECK64-NEXT: .cfi_restore z10
-; CHECK64-NEXT: .cfi_restore z11
-; CHECK64-NEXT: .cfi_restore z12
-; CHECK64-NEXT: .cfi_restore z13
-; CHECK64-NEXT: .cfi_restore z14
-; CHECK64-NEXT: .cfi_restore z15
-; CHECK64-NEXT: .cfi_def_cfa wsp, 128
-; CHECK64-NEXT: ldp x26, x19, [sp, #104] // 16-byte Folded Reload
-; CHECK64-NEXT: ldp x28, x27, [sp, #88] // 16-byte Folded Reload
-; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
-; CHECK64-NEXT: add sp, sp, #128
+; CHECK64-NEXT: addvl sp, sp, #2
+; CHECK64-NEXT: .cfi_def_cfa wsp, 64
+; CHECK64-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
; CHECK64-NEXT: .cfi_def_cfa_offset 0
; CHECK64-NEXT: .cfi_restore w19
; CHECK64-NEXT: .cfi_restore w26
diff --git a/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll b/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll
new file mode 100644
index 0000000..a1065bc
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll
@@ -0,0 +1,131 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mattr=+sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Muls with (-1) as operand should fold to neg.
+define <vscale x 16 x i8> @mul_neg_fold_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: mul_neg_fold_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.b, p0/m, z0.b
+; CHECK-NEXT: ret
+ %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1))
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 8 x i16> @mul_neg_fold_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: mul_neg_fold_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.h, p0/m, z0.h
+; CHECK-NEXT: ret
+ %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1))
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 4 x i32> @mul_neg_fold_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: mul_neg_fold_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.s, p0/m, z0.s
+; CHECK-NEXT: ret
+ %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1))
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 2 x i64> @mul_neg_fold_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: mul_neg_fold_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.d, p0/m, z0.d
+; CHECK-NEXT: ret
+ %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1))
+ ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 16 x i8> @mul_neg_fold_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: mul_neg_fold_u_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.b, p0/m, z0.b
+; CHECK-NEXT: ret
+ %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1))
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 8 x i16> @mul_neg_fold_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: mul_neg_fold_u_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.h, p0/m, z0.h
+; CHECK-NEXT: ret
+ %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1))
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 4 x i32> @mul_neg_fold_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: mul_neg_fold_u_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.s, p0/m, z0.s
+; CHECK-NEXT: ret
+ %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1))
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 2 x i64> @mul_neg_fold_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: mul_neg_fold_u_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg z0.d, p0/m, z0.d
+; CHECK-NEXT: ret
+ %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1))
+ ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 16 x i8> @mul_neg_fold_different_argument_order_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: mul_neg_fold_different_argument_order_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.b, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: neg z1.b, p0/m, z0.b
+; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: ret
+ %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 -1), <vscale x 16 x i8> %a)
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 8 x i16> @mul_neg_fold_different_argument_order_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: mul_neg_fold_different_argument_order_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: neg z1.h, p0/m, z0.h
+; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: ret
+ %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 -1), <vscale x 8 x i16> %a)
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 4 x i32> @mul_neg_fold_different_argument_order_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: mul_neg_fold_different_argument_order_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: neg z1.s, p0/m, z0.s
+; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: ret
+ %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 -1), <vscale x 4 x i32> %a)
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 2 x i64> @mul_neg_fold_different_argument_order_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: mul_neg_fold_different_argument_order_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.d, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: neg z1.d, p0/m, z0.d
+; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: ret
+ %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 -1), <vscale x 2 x i64> %a)
+ ret <vscale x 2 x i64> %1
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll
index 41fda6d..efa51ea 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll
@@ -90,26 +90,24 @@ define <3 x i32> @v_load_constant_v3i32_align1(ptr addrspace(4) %ptr) {
; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v10, v[0:1], off offset:8
; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v11, v[0:1], off offset:9
; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v12, v[0:1], off offset:11
-; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v0, v[0:1], off offset:10
+; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v13, v[0:1], off offset:10
; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0xa
; GFX1250-NOUNALIGNED-NEXT: s_wait_xcnt 0x0
-; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v1, v3, 8, v2
+; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v0, v3, 8, v2
; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x8
-; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v3, 16, v4 :: v_dual_lshlrev_b32 v2, 24, v5
+; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v2, 16, v4 :: v_dual_lshlrev_b32 v1, 24, v5
; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x6
-; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v4, v7, 8, v6
+; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v3, v7, 8, v6
; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x4
-; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v6, 16, v8 :: v_dual_lshlrev_b32 v5, 24, v9
+; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v5, 16, v8 :: v_dual_lshlrev_b32 v4, 24, v9
+; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v0, v1, v2, v0
; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x2
-; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v7, v11, 8, v10
-; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x1
-; GFX1250-NOUNALIGNED-NEXT: v_lshlrev_b32_e32 v8, 24, v12
+; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v6, v11, 8, v10
; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NOUNALIGNED-NEXT: v_lshlrev_b32_e32 v9, 16, v0
-; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v0, v2, v3, v1
-; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v1, v5, v6, v4
-; GFX1250-NOUNALIGNED-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v2, v8, v9, v7
+; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v7, 24, v12 :: v_dual_lshlrev_b32 v8, 16, v13
+; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v1, v4, v5, v3
+; GFX1250-NOUNALIGNED-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v2, v7, v8, v6
; GFX1250-NOUNALIGNED-NEXT: s_set_pc_i64 s[30:31]
;
; GFX9-UNALIGNED-LABEL: v_load_constant_v3i32_align1:
@@ -942,7 +940,7 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align1(ptr addrspace(4) inreg
;
; GFX1250-NOUNALIGNED-LABEL: s_load_constant_v3i32_align1:
; GFX1250-NOUNALIGNED: ; %bb.0:
-; GFX1250-NOUNALIGNED-NEXT: s_clause 0xa
+; GFX1250-NOUNALIGNED-NEXT: s_clause 0xb
; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s2, s[0:1], 0x1
; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s3, s[0:1], 0x3
; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s4, s[0:1], 0x2
@@ -954,27 +952,26 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align1(ptr addrspace(4) inreg
; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s10, s[0:1], 0x0
; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s11, s[0:1], 0x4
; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s12, s[0:1], 0xa
-; GFX1250-NOUNALIGNED-NEXT: s_wait_xcnt 0x0
-; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s1, s[0:1], 0x8
+; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s13, s[0:1], 0x8
; GFX1250-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0
; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s0, s2, 8
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s3, 24
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s3, s4, 16
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s4, s5, 8
-; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s2, s3
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s5, s6, 24
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s6, s7, 16
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s7, s8, 8
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s1, s3, 24
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s4, 16
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s3, s5, 8
+; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s1, s2
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s4, s6, 24
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s5, s7, 16
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s6, s8, 8
; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s0, s10
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s8, s9, 24
-; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s2, s0
-; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s12, 16
-; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s3, s4, s11
-; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s4, s5, s6
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s7, s9, 24
+; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s1, s0
+; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s1, s12, 16
+; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s3, s11
+; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s3, s4, s5
+; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s4, s6, s13
; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s5, s7, s1
-; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s8, s2
-; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s4, s3
-; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s2, s5
+; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s3, s2
+; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s5, s4
; GFX1250-NOUNALIGNED-NEXT: ; return to shader part epilog
;
; GFX9-UNALIGNED-LABEL: s_load_constant_v3i32_align1:
@@ -1351,11 +1348,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align2(ptr addrspace(4) inreg
}
define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align4(ptr addrspace(4) inreg %ptr) {
-; GFX12-LABEL: s_load_constant_v3i32_align4:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ; return to shader part epilog
+; GFX12-UNALIGNED-LABEL: s_load_constant_v3i32_align4:
+; GFX12-UNALIGNED: ; %bb.0:
+; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-UNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX12-NOUNALIGNED-LABEL: s_load_constant_v3i32_align4:
+; GFX12-NOUNALIGNED: ; %bb.0:
+; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: s_load_constant_v3i32_align4:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_mov_b32 s4, s0
+; GFX1250-NEXT: s_mov_b32 s5, s1
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_load_constant_v3i32_align4:
; GFX9: ; %bb.0:
@@ -1388,11 +1399,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align4(ptr addrspace(4) inreg
}
define amdgpu_ps i96 @s_load_constant_i96_align8(ptr addrspace(4) inreg %ptr) {
-; GFX12-LABEL: s_load_constant_i96_align8:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ; return to shader part epilog
+; GFX12-UNALIGNED-LABEL: s_load_constant_i96_align8:
+; GFX12-UNALIGNED: ; %bb.0:
+; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-UNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX12-NOUNALIGNED-LABEL: s_load_constant_i96_align8:
+; GFX12-NOUNALIGNED: ; %bb.0:
+; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: s_load_constant_i96_align8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_mov_b32 s4, s0
+; GFX1250-NEXT: s_mov_b32 s5, s1
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_load_constant_i96_align8:
; GFX9: ; %bb.0:
@@ -1425,11 +1450,25 @@ define amdgpu_ps i96 @s_load_constant_i96_align8(ptr addrspace(4) inreg %ptr) {
}
define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align8(ptr addrspace(4) inreg %ptr) {
-; GFX12-LABEL: s_load_constant_v3i32_align8:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ; return to shader part epilog
+; GFX12-UNALIGNED-LABEL: s_load_constant_v3i32_align8:
+; GFX12-UNALIGNED: ; %bb.0:
+; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-UNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX12-NOUNALIGNED-LABEL: s_load_constant_v3i32_align8:
+; GFX12-NOUNALIGNED: ; %bb.0:
+; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: s_load_constant_v3i32_align8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_mov_b32 s4, s0
+; GFX1250-NEXT: s_mov_b32 s5, s1
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_load_constant_v3i32_align8:
; GFX9: ; %bb.0:
@@ -1462,11 +1501,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align8(ptr addrspace(4) inreg
}
define amdgpu_ps <3 x i32> @s_load_constant_v6i16_align8(ptr addrspace(4) inreg %ptr) {
-; GFX12-LABEL: s_load_constant_v6i16_align8:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: ; return to shader part epilog
+; GFX12-UNALIGNED-LABEL: s_load_constant_v6i16_align8:
+; GFX12-UNALIGNED: ; %bb.0:
+; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-UNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX12-NOUNALIGNED-LABEL: s_load_constant_v6i16_align8:
+; GFX12-NOUNALIGNED: ; %bb.0:
+; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: s_load_constant_v6i16_align8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_mov_b32 s4, s0
+; GFX1250-NEXT: s_mov_b32 s5, s1
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_load_constant_v6i16_align8:
; GFX9: ; %bb.0:
@@ -1500,24 +1553,64 @@ define amdgpu_ps <3 x i32> @s_load_constant_v6i16_align8(ptr addrspace(4) inreg
}
define amdgpu_ps <12 x i8> @s_load_constant_v12i8_align8(ptr addrspace(4) inreg %ptr) {
-; GFX12-LABEL: s_load_constant_v12i8_align8:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_lshr_b32 s13, s0, 8
-; GFX12-NEXT: s_lshr_b32 s12, s0, 16
-; GFX12-NEXT: s_lshr_b32 s3, s0, 24
-; GFX12-NEXT: s_lshr_b32 s5, s1, 8
-; GFX12-NEXT: s_lshr_b32 s6, s1, 16
-; GFX12-NEXT: s_lshr_b32 s7, s1, 24
-; GFX12-NEXT: s_lshr_b32 s9, s2, 8
-; GFX12-NEXT: s_lshr_b32 s10, s2, 16
-; GFX12-NEXT: s_lshr_b32 s11, s2, 24
-; GFX12-NEXT: s_mov_b32 s4, s1
-; GFX12-NEXT: s_mov_b32 s8, s2
-; GFX12-NEXT: s_mov_b32 s1, s13
-; GFX12-NEXT: s_mov_b32 s2, s12
-; GFX12-NEXT: ; return to shader part epilog
+; GFX12-UNALIGNED-LABEL: s_load_constant_v12i8_align8:
+; GFX12-UNALIGNED: ; %bb.0:
+; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s13, s0, 8
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s12, s0, 16
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s3, s0, 24
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s5, s1, 8
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s6, s1, 16
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s7, s1, 24
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s9, s2, 8
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s10, s2, 16
+; GFX12-UNALIGNED-NEXT: s_lshr_b32 s11, s2, 24
+; GFX12-UNALIGNED-NEXT: s_mov_b32 s4, s1
+; GFX12-UNALIGNED-NEXT: s_mov_b32 s8, s2
+; GFX12-UNALIGNED-NEXT: s_mov_b32 s1, s13
+; GFX12-UNALIGNED-NEXT: s_mov_b32 s2, s12
+; GFX12-UNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX12-NOUNALIGNED-LABEL: s_load_constant_v12i8_align8:
+; GFX12-NOUNALIGNED: ; %bb.0:
+; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s13, s0, 8
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s12, s0, 16
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s3, s0, 24
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s5, s1, 8
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s6, s1, 16
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s7, s1, 24
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s9, s2, 8
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s10, s2, 16
+; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s11, s2, 24
+; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s4, s1
+; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s8, s2
+; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s1, s13
+; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s2, s12
+; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: s_load_constant_v12i8_align8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_mov_b32 s4, s0
+; GFX1250-NEXT: s_mov_b32 s5, s1
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_lshr_b32 s13, s0, 8
+; GFX1250-NEXT: s_lshr_b32 s12, s0, 16
+; GFX1250-NEXT: s_lshr_b32 s3, s0, 24
+; GFX1250-NEXT: s_lshr_b32 s5, s1, 8
+; GFX1250-NEXT: s_lshr_b32 s6, s1, 16
+; GFX1250-NEXT: s_lshr_b32 s7, s1, 24
+; GFX1250-NEXT: s_lshr_b32 s9, s2, 8
+; GFX1250-NEXT: s_lshr_b32 s10, s2, 16
+; GFX1250-NEXT: s_lshr_b32 s11, s2, 24
+; GFX1250-NEXT: s_mov_b32 s4, s1
+; GFX1250-NEXT: s_mov_b32 s8, s2
+; GFX1250-NEXT: s_mov_b32 s1, s13
+; GFX1250-NEXT: s_mov_b32 s2, s12
+; GFX1250-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_load_constant_v12i8_align8:
; GFX9: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
index aac499f..b486fabb 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
@@ -9,15 +9,14 @@ target triple = "amdgcn-amd-amdhsa"
define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) {
; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_lshlrev_b32 v1, 20, v0
-; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s2, -1
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_lshlrev_b32 v1, 20, v0
+; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s0, -1
; GFX1250-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0
-; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1]
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo
@@ -56,13 +55,11 @@ define amdgpu_kernel void @use_private_to_flat_addrspacecast_nonnull(ptr addrspa
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 20, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1]
; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS
; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
; GFX1250-SDAG-NEXT: s_endpgm
@@ -91,10 +88,9 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) {
; GFX1250-LABEL: use_flat_to_private_addrspacecast:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
; GFX1250-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-NEXT: s_cselect_b32 s0, s2, -1
; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
@@ -110,9 +106,8 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) {
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
; GFX1250-SDAG-NEXT: s_endpgm
@@ -122,9 +117,7 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) {
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
; GFX1250-GISEL-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll
new file mode 100644
index 0000000..0d68762
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 6
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes='amdgpu-attributor' %s -o - | FileCheck %s
+
+@lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+
+;.
+; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+;.
+define amdgpu_kernel void @k0() #0 {
+; CHECK: Function Attrs: sanitize_address
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: store i8 7, ptr addrspace(3) @lds_1, align 4
+; CHECK-NEXT: ret void
+;
+ store i8 7, ptr addrspace(3) @lds_1, align 4
+ ret void
+}
+
+attributes #0 = { sanitize_address }
+; "amdgpu-no-flat-scratch-init" attribute should not be present in attribute list
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
index a688b6f..fb566e5 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -707,8 +707,8 @@ attributes #6 = { "enqueued-block" }
; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR15:[0-9]+]] = { nounwind "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR19:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR20:[0-9]+]] = { "enqueued-block" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "enqueued-block" "uniform-work-group-size"="false" }
diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
index ef52694..54871a6 100644
--- a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
@@ -538,58 +538,61 @@ define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
; GFX1250-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB34_6
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_cbranch_execnz .LBB34_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB34_8
+; GFX1250-NEXT: .LBB34_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB34_3: ; %atomicrmw.check.private
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB34_3
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
-; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execz .LBB34_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB34_3: ; %Flow
+; GFX1250-NEXT: .LBB34_5: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB34_5
-; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB34_7
+; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB34_5: ; %Flow1
+; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-NEXT: .LBB34_7: ; %Flow1
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB34_6: ; %Flow2
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB34_8
-; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_cbranch_execz .LBB34_2
+; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
-; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_wait_dscnt 0x0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fadd ptr %ptr, double %val monotonic
ret double %result
@@ -600,58 +603,61 @@ define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
; GFX1250-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB35_6
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_cbranch_execnz .LBB35_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB35_8
+; GFX1250-NEXT: .LBB35_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB35_3: ; %atomicrmw.check.private
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB35_3
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
-; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execz .LBB35_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB35_3: ; %Flow
+; GFX1250-NEXT: .LBB35_5: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB35_5
-; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB35_7
+; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB35_5: ; %Flow1
+; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-NEXT: .LBB35_7: ; %Flow1
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB35_6: ; %Flow2
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB35_8
-; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_cbranch_execz .LBB35_2
+; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
-; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_wait_dscnt 0x0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic
ret double %result
@@ -686,40 +692,42 @@ define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB38_2
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
-; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execnz .LBB38_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB38_4
+; GFX1250-NEXT: .LBB38_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB38_3: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB38_2: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB38_4
-; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB38_2
+; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
-; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi
+; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
+; GFX1250-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fmin ptr %ptr, double %val monotonic
ret double %result
@@ -730,40 +738,42 @@ define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB39_2
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
-; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execnz .LBB39_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB39_4
+; GFX1250-NEXT: .LBB39_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB39_3: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB39_2: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB39_4
-; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB39_2
+; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
-; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.phi
+; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
+; GFX1250-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fmin ptr %ptr, double %val syncscope("one-as") monotonic
ret double %result
@@ -798,40 +808,42 @@ define double @flat_system_atomic_fmax_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB42_2
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
-; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execnz .LBB42_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB42_4
+; GFX1250-NEXT: .LBB42_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB42_3: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB42_2: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB42_4
-; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB42_2
+; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
-; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.phi
+; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
+; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fmax ptr %ptr, double %val monotonic
ret double %result
@@ -842,40 +854,42 @@ define double @flat_one_as_atomic_fmax_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB43_2
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
-; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execnz .LBB43_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB43_4
+; GFX1250-NEXT: .LBB43_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB43_3: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB43_2: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB43_4
-; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB43_2
+; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
-; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.phi
+; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
+; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fmax ptr %ptr, double %val syncscope("one-as") monotonic
ret double %result
@@ -982,13 +996,11 @@ define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB52_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1000,10 +1012,9 @@ define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB52_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
@@ -1025,13 +1036,11 @@ define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB53_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1043,10 +1052,9 @@ define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB53_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
@@ -1068,13 +1076,11 @@ define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB54_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1086,10 +1092,9 @@ define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB54_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
@@ -1111,13 +1116,11 @@ define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB55_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1129,10 +1132,9 @@ define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB55_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
@@ -1154,13 +1156,11 @@ define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB56_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1172,10 +1172,9 @@ define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB56_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
@@ -1197,13 +1196,11 @@ define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB57_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1215,10 +1212,9 @@ define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB57_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
@@ -1240,13 +1236,11 @@ define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB58_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1258,10 +1252,9 @@ define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB58_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
@@ -1283,13 +1276,11 @@ define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-NEXT: s_cbranch_execz .LBB59_2
; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
@@ -1301,10 +1292,9 @@ define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) {
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-NEXT: s_cbranch_execz .LBB59_4
; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 94ba5cd..6b5647e 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -569,10 +569,10 @@ define <16 x bfloat> @v_load_global_v16bf16(ptr addrspace(1) %ptr) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_load_b128 v[0:3], v[4:5], off
-; GFX1250-NEXT: global_load_b128 v[4:7], v[4:5], off offset:16
+; GFX1250-NEXT: global_load_b128 v[0:3], v[8:9], off
+; GFX1250-NEXT: global_load_b128 v[4:7], v[8:9], off offset:16
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%load = load <16 x bfloat>, ptr addrspace(1) %ptr
@@ -752,12 +752,12 @@ define <32 x bfloat> @v_load_global_v32bf16(ptr addrspace(1) %ptr) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v13, v1 :: v_dual_mov_b32 v12, v0
+; GFX1250-NEXT: v_dual_mov_b32 v17, v1 :: v_dual_mov_b32 v16, v0
; GFX1250-NEXT: s_clause 0x3
-; GFX1250-NEXT: global_load_b128 v[0:3], v[12:13], off
-; GFX1250-NEXT: global_load_b128 v[4:7], v[12:13], off offset:16
-; GFX1250-NEXT: global_load_b128 v[8:11], v[12:13], off offset:32
-; GFX1250-NEXT: global_load_b128 v[12:15], v[12:13], off offset:48
+; GFX1250-NEXT: global_load_b128 v[0:3], v[16:17], off
+; GFX1250-NEXT: global_load_b128 v[4:7], v[16:17], off offset:16
+; GFX1250-NEXT: global_load_b128 v[8:11], v[16:17], off offset:32
+; GFX1250-NEXT: global_load_b128 v[12:15], v[16:17], off offset:48
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%load = load <32 x bfloat>, ptr addrspace(1) %ptr
@@ -1055,16 +1055,16 @@ define <64 x bfloat> @v_load_global_v64bf16(ptr addrspace(1) %ptr) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v29, v1 :: v_dual_mov_b32 v28, v0
+; GFX1250-NEXT: v_dual_mov_b32 v33, v1 :: v_dual_mov_b32 v32, v0
; GFX1250-NEXT: s_clause 0x7
-; GFX1250-NEXT: global_load_b128 v[0:3], v[28:29], off
-; GFX1250-NEXT: global_load_b128 v[4:7], v[28:29], off offset:16
-; GFX1250-NEXT: global_load_b128 v[8:11], v[28:29], off offset:32
-; GFX1250-NEXT: global_load_b128 v[12:15], v[28:29], off offset:48
-; GFX1250-NEXT: global_load_b128 v[16:19], v[28:29], off offset:64
-; GFX1250-NEXT: global_load_b128 v[20:23], v[28:29], off offset:80
-; GFX1250-NEXT: global_load_b128 v[24:27], v[28:29], off offset:96
-; GFX1250-NEXT: global_load_b128 v[28:31], v[28:29], off offset:112
+; GFX1250-NEXT: global_load_b128 v[0:3], v[32:33], off
+; GFX1250-NEXT: global_load_b128 v[4:7], v[32:33], off offset:16
+; GFX1250-NEXT: global_load_b128 v[8:11], v[32:33], off offset:32
+; GFX1250-NEXT: global_load_b128 v[12:15], v[32:33], off offset:48
+; GFX1250-NEXT: global_load_b128 v[16:19], v[32:33], off offset:64
+; GFX1250-NEXT: global_load_b128 v[20:23], v[32:33], off offset:80
+; GFX1250-NEXT: global_load_b128 v[24:27], v[32:33], off offset:96
+; GFX1250-NEXT: global_load_b128 v[28:31], v[32:33], off offset:112
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%load = load <64 x bfloat>, ptr addrspace(1) %ptr
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
index 243f0ed..f8655a7 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
@@ -256,7 +256,6 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add
; GCN-NEXT: s_wait_storecnt 0x0
; GCN-NEXT: .LBB5_3: ; %bb4
; GCN-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GCN-NEXT: s_wait_xcnt 0x0
; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 63
; GCN-NEXT: s_wait_kmcnt 0x0
; GCN-NEXT: global_store_b32 v0, v1, s[0:1] scope:SCOPE_SYS
diff --git a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll
index ddd3b152..363a248 100644
--- a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll
+++ b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll
@@ -2700,142 +2700,142 @@ define amdgpu_kernel void @amd_kernel_v32i8(<32 x i8> %arg0) {
;
; GFX1250-LABEL: amd_kernel_v32i8:
; GFX1250: ; %bb.0: ; %entry
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
; GFX1250-NEXT: v_mov_b64_e32 v[8:9], 16
; GFX1250-NEXT: v_mov_b64_e32 v[10:11], 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_lshr_b32 s16, s0, 16
-; GFX1250-NEXT: s_lshr_b32 s17, s0, 24
-; GFX1250-NEXT: s_lshr_b32 s20, s2, 16
-; GFX1250-NEXT: s_lshr_b32 s21, s2, 24
-; GFX1250-NEXT: s_lshr_b32 s14, s7, 16
-; GFX1250-NEXT: s_lshr_b32 s15, s7, 24
-; GFX1250-NEXT: s_bfe_u32 s27, s7, 0x80008
+; GFX1250-NEXT: s_lshr_b32 s16, s8, 16
+; GFX1250-NEXT: s_lshr_b32 s17, s8, 24
+; GFX1250-NEXT: s_lshr_b32 s6, s15, 16
+; GFX1250-NEXT: s_lshr_b32 s7, s15, 24
+; GFX1250-NEXT: s_bfe_u32 s27, s15, 0x80008
; GFX1250-NEXT: s_add_co_i32 s17, s17, s17
; GFX1250-NEXT: s_add_co_i32 s16, s16, s16
-; GFX1250-NEXT: s_lshr_b32 s18, s1, 16
-; GFX1250-NEXT: s_lshr_b32 s19, s1, 24
-; GFX1250-NEXT: s_lshr_b32 s22, s3, 16
-; GFX1250-NEXT: s_lshr_b32 s23, s3, 24
-; GFX1250-NEXT: s_bfe_u32 s29, s1, 0x80008
-; GFX1250-NEXT: s_bfe_u32 s30, s3, 0x80008
-; GFX1250-NEXT: s_add_co_i32 s21, s21, s21
-; GFX1250-NEXT: s_add_co_i32 s20, s20, s20
; GFX1250-NEXT: s_lshl_b32 s17, s17, 8
; GFX1250-NEXT: s_and_b32 s16, s16, 0xff
-; GFX1250-NEXT: s_add_co_i32 s7, s7, s7
-; GFX1250-NEXT: s_add_co_i32 s27, s27, s27
; GFX1250-NEXT: s_add_co_i32 s15, s15, s15
-; GFX1250-NEXT: s_add_co_i32 s14, s14, s14
-; GFX1250-NEXT: s_add_co_i32 s3, s3, s3
+; GFX1250-NEXT: s_add_co_i32 s27, s27, s27
+; GFX1250-NEXT: s_add_co_i32 s7, s7, s7
+; GFX1250-NEXT: s_add_co_i32 s6, s6, s6
+; GFX1250-NEXT: s_or_b32 s16, s16, s17
+; GFX1250-NEXT: s_and_b32 s15, s15, 0xff
+; GFX1250-NEXT: s_lshl_b32 s17, s27, 8
+; GFX1250-NEXT: s_lshl_b32 s7, s7, 8
+; GFX1250-NEXT: s_and_b32 s6, s6, 0xff
+; GFX1250-NEXT: s_or_b32 s15, s15, s17
+; GFX1250-NEXT: s_or_b32 s6, s6, s7
+; GFX1250-NEXT: s_bfe_u32 s26, s14, 0x80008
+; GFX1250-NEXT: s_and_b32 s7, s15, 0xffff
+; GFX1250-NEXT: s_lshl_b32 s6, s6, 16
+; GFX1250-NEXT: s_lshr_b32 s20, s10, 16
+; GFX1250-NEXT: s_lshr_b32 s21, s10, 24
+; GFX1250-NEXT: s_lshr_b32 s4, s14, 16
+; GFX1250-NEXT: s_lshr_b32 s5, s14, 24
+; GFX1250-NEXT: s_or_b32 s6, s7, s6
+; GFX1250-NEXT: s_add_co_i32 s7, s14, s14
+; GFX1250-NEXT: s_add_co_i32 s26, s26, s26
+; GFX1250-NEXT: s_lshr_b32 s18, s9, 16
+; GFX1250-NEXT: s_lshr_b32 s19, s9, 24
+; GFX1250-NEXT: s_lshr_b32 s22, s11, 16
+; GFX1250-NEXT: s_lshr_b32 s23, s11, 24
+; GFX1250-NEXT: s_bfe_u32 s29, s9, 0x80008
+; GFX1250-NEXT: s_bfe_u32 s30, s11, 0x80008
+; GFX1250-NEXT: s_add_co_i32 s21, s21, s21
+; GFX1250-NEXT: s_add_co_i32 s20, s20, s20
+; GFX1250-NEXT: s_lshr_b32 s2, s13, 16
+; GFX1250-NEXT: s_lshr_b32 s3, s13, 24
+; GFX1250-NEXT: s_and_b32 s7, s7, 0xff
+; GFX1250-NEXT: s_lshl_b32 s14, s26, 8
+; GFX1250-NEXT: s_add_co_i32 s5, s5, s5
+; GFX1250-NEXT: s_add_co_i32 s4, s4, s4
+; GFX1250-NEXT: s_add_co_i32 s11, s11, s11
; GFX1250-NEXT: s_add_co_i32 s30, s30, s30
; GFX1250-NEXT: s_add_co_i32 s23, s23, s23
; GFX1250-NEXT: s_add_co_i32 s22, s22, s22
; GFX1250-NEXT: s_lshl_b32 s21, s21, 8
; GFX1250-NEXT: s_and_b32 s20, s20, 0xff
-; GFX1250-NEXT: s_add_co_i32 s1, s1, s1
+; GFX1250-NEXT: s_add_co_i32 s9, s9, s9
; GFX1250-NEXT: s_add_co_i32 s29, s29, s29
; GFX1250-NEXT: s_add_co_i32 s19, s19, s19
; GFX1250-NEXT: s_add_co_i32 s18, s18, s18
-; GFX1250-NEXT: s_lshr_b32 s10, s5, 16
-; GFX1250-NEXT: s_lshr_b32 s11, s5, 24
-; GFX1250-NEXT: s_lshr_b32 s12, s6, 16
-; GFX1250-NEXT: s_lshr_b32 s13, s6, 24
-; GFX1250-NEXT: s_or_b32 s16, s16, s17
-; GFX1250-NEXT: s_and_b32 s7, s7, 0xff
-; GFX1250-NEXT: s_lshl_b32 s17, s27, 8
-; GFX1250-NEXT: s_lshl_b32 s15, s15, 8
-; GFX1250-NEXT: s_and_b32 s14, s14, 0xff
-; GFX1250-NEXT: s_and_b32 s3, s3, 0xff
+; GFX1250-NEXT: s_bfe_u32 s25, s13, 0x80008
+; GFX1250-NEXT: s_lshl_b32 s5, s5, 8
+; GFX1250-NEXT: s_and_b32 s4, s4, 0xff
+; GFX1250-NEXT: s_or_b32 s7, s7, s14
+; GFX1250-NEXT: s_add_co_i32 s3, s3, s3
+; GFX1250-NEXT: s_add_co_i32 s2, s2, s2
+; GFX1250-NEXT: s_and_b32 s11, s11, 0xff
; GFX1250-NEXT: s_lshl_b32 s30, s30, 8
; GFX1250-NEXT: s_lshl_b32 s23, s23, 8
; GFX1250-NEXT: s_and_b32 s22, s22, 0xff
; GFX1250-NEXT: s_or_b32 s20, s20, s21
-; GFX1250-NEXT: s_and_b32 s1, s1, 0xff
+; GFX1250-NEXT: s_and_b32 s9, s9, 0xff
; GFX1250-NEXT: s_lshl_b32 s21, s29, 8
; GFX1250-NEXT: s_lshl_b32 s19, s19, 8
; GFX1250-NEXT: s_and_b32 s18, s18, 0xff
-; GFX1250-NEXT: s_lshr_b32 s8, s4, 16
-; GFX1250-NEXT: s_lshr_b32 s9, s4, 24
-; GFX1250-NEXT: s_bfe_u32 s24, s4, 0x80008
-; GFX1250-NEXT: s_bfe_u32 s25, s5, 0x80008
-; GFX1250-NEXT: s_bfe_u32 s26, s6, 0x80008
-; GFX1250-NEXT: s_or_b32 s7, s7, s17
-; GFX1250-NEXT: s_or_b32 s14, s14, s15
-; GFX1250-NEXT: s_add_co_i32 s13, s13, s13
-; GFX1250-NEXT: s_add_co_i32 s12, s12, s12
-; GFX1250-NEXT: s_add_co_i32 s11, s11, s11
-; GFX1250-NEXT: s_add_co_i32 s10, s10, s10
-; GFX1250-NEXT: s_bfe_u32 s28, s0, 0x80008
-; GFX1250-NEXT: s_or_b32 s3, s3, s30
+; GFX1250-NEXT: s_lshr_b32 s0, s12, 16
+; GFX1250-NEXT: s_lshr_b32 s1, s12, 24
+; GFX1250-NEXT: s_bfe_u32 s24, s12, 0x80008
+; GFX1250-NEXT: s_or_b32 s4, s4, s5
+; GFX1250-NEXT: s_and_b32 s5, s7, 0xffff
+; GFX1250-NEXT: s_add_co_i32 s7, s13, s13
+; GFX1250-NEXT: s_add_co_i32 s25, s25, s25
+; GFX1250-NEXT: s_lshl_b32 s3, s3, 8
+; GFX1250-NEXT: s_and_b32 s2, s2, 0xff
+; GFX1250-NEXT: s_bfe_u32 s28, s8, 0x80008
+; GFX1250-NEXT: s_or_b32 s11, s11, s30
; GFX1250-NEXT: s_or_b32 s22, s22, s23
-; GFX1250-NEXT: s_bfe_u32 s23, s2, 0x80008
-; GFX1250-NEXT: s_or_b32 s1, s1, s21
+; GFX1250-NEXT: s_bfe_u32 s23, s10, 0x80008
+; GFX1250-NEXT: s_or_b32 s9, s9, s21
; GFX1250-NEXT: s_or_b32 s18, s18, s19
-; GFX1250-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX1250-NEXT: s_lshl_b32 s14, s14, 16
-; GFX1250-NEXT: s_add_co_i32 s6, s6, s6
-; GFX1250-NEXT: s_add_co_i32 s26, s26, s26
-; GFX1250-NEXT: s_lshl_b32 s13, s13, 8
-; GFX1250-NEXT: s_and_b32 s12, s12, 0xff
-; GFX1250-NEXT: s_add_co_i32 s5, s5, s5
-; GFX1250-NEXT: s_add_co_i32 s25, s25, s25
-; GFX1250-NEXT: s_lshl_b32 s11, s11, 8
-; GFX1250-NEXT: s_and_b32 s10, s10, 0xff
-; GFX1250-NEXT: s_add_co_i32 s4, s4, s4
+; GFX1250-NEXT: s_lshl_b32 s4, s4, 16
+; GFX1250-NEXT: s_and_b32 s7, s7, 0xff
+; GFX1250-NEXT: s_lshl_b32 s13, s25, 8
+; GFX1250-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-NEXT: s_add_co_i32 s3, s12, s12
; GFX1250-NEXT: s_add_co_i32 s24, s24, s24
-; GFX1250-NEXT: s_add_co_i32 s9, s9, s9
-; GFX1250-NEXT: s_add_co_i32 s8, s8, s8
-; GFX1250-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX1250-NEXT: s_add_co_i32 s1, s1, s1
+; GFX1250-NEXT: s_add_co_i32 s0, s0, s0
+; GFX1250-NEXT: s_and_b32 s11, s11, 0xffff
; GFX1250-NEXT: s_lshl_b32 s22, s22, 16
-; GFX1250-NEXT: s_add_co_i32 s2, s2, s2
+; GFX1250-NEXT: s_add_co_i32 s10, s10, s10
; GFX1250-NEXT: s_add_co_i32 s23, s23, s23
-; GFX1250-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX1250-NEXT: s_and_b32 s9, s9, 0xffff
; GFX1250-NEXT: s_lshl_b32 s18, s18, 16
-; GFX1250-NEXT: s_add_co_i32 s0, s0, s0
+; GFX1250-NEXT: s_add_co_i32 s8, s8, s8
; GFX1250-NEXT: s_add_co_i32 s28, s28, s28
-; GFX1250-NEXT: s_or_b32 s7, s7, s14
-; GFX1250-NEXT: s_and_b32 s6, s6, 0xff
-; GFX1250-NEXT: s_lshl_b32 s14, s26, 8
-; GFX1250-NEXT: s_or_b32 s12, s12, s13
-; GFX1250-NEXT: s_and_b32 s5, s5, 0xff
-; GFX1250-NEXT: s_lshl_b32 s13, s25, 8
-; GFX1250-NEXT: s_or_b32 s10, s10, s11
-; GFX1250-NEXT: s_and_b32 s4, s4, 0xff
-; GFX1250-NEXT: s_lshl_b32 s11, s24, 8
-; GFX1250-NEXT: s_lshl_b32 s9, s9, 8
-; GFX1250-NEXT: s_and_b32 s8, s8, 0xff
-; GFX1250-NEXT: s_or_b32 s3, s3, s22
-; GFX1250-NEXT: s_and_b32 s2, s2, 0xff
-; GFX1250-NEXT: s_lshl_b32 s22, s23, 8
-; GFX1250-NEXT: s_or_b32 s1, s1, s18
+; GFX1250-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-NEXT: s_or_b32 s5, s7, s13
+; GFX1250-NEXT: s_and_b32 s3, s3, 0xff
+; GFX1250-NEXT: s_lshl_b32 s7, s24, 8
+; GFX1250-NEXT: s_lshl_b32 s1, s1, 8
; GFX1250-NEXT: s_and_b32 s0, s0, 0xff
+; GFX1250-NEXT: s_or_b32 s11, s11, s22
+; GFX1250-NEXT: s_and_b32 s10, s10, 0xff
+; GFX1250-NEXT: s_lshl_b32 s22, s23, 8
+; GFX1250-NEXT: s_or_b32 s9, s9, s18
+; GFX1250-NEXT: s_and_b32 s8, s8, 0xff
; GFX1250-NEXT: s_lshl_b32 s18, s28, 8
-; GFX1250-NEXT: s_or_b32 s6, s6, s14
-; GFX1250-NEXT: s_or_b32 s5, s5, s13
-; GFX1250-NEXT: s_or_b32 s4, s4, s11
-; GFX1250-NEXT: s_or_b32 s8, s8, s9
-; GFX1250-NEXT: s_or_b32 s2, s2, s22
-; GFX1250-NEXT: s_or_b32 s0, s0, s18
-; GFX1250-NEXT: s_and_b32 s6, s6, 0xffff
-; GFX1250-NEXT: s_lshl_b32 s12, s12, 16
+; GFX1250-NEXT: s_or_b32 s3, s3, s7
+; GFX1250-NEXT: s_or_b32 s0, s0, s1
+; GFX1250-NEXT: s_or_b32 s10, s10, s22
+; GFX1250-NEXT: s_or_b32 s8, s8, s18
; GFX1250-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX1250-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX1250-NEXT: s_lshl_b32 s8, s8, 16
-; GFX1250-NEXT: s_lshl_b32 s9, s10, 16
-; GFX1250-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX1250-NEXT: s_and_b32 s1, s3, 0xffff
+; GFX1250-NEXT: s_lshl_b32 s0, s0, 16
+; GFX1250-NEXT: s_lshl_b32 s2, s2, 16
+; GFX1250-NEXT: s_and_b32 s10, s10, 0xffff
; GFX1250-NEXT: s_lshl_b32 s20, s20, 16
-; GFX1250-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX1250-NEXT: s_and_b32 s8, s8, 0xffff
; GFX1250-NEXT: s_lshl_b32 s16, s16, 16
-; GFX1250-NEXT: s_or_b32 s6, s6, s12
-; GFX1250-NEXT: s_or_b32 s4, s4, s8
-; GFX1250-NEXT: s_or_b32 s5, s5, s9
-; GFX1250-NEXT: s_or_b32 s2, s2, s20
-; GFX1250-NEXT: s_or_b32 s0, s0, s16
-; GFX1250-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX1250-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
-; GFX1250-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1
-; GFX1250-NEXT: v_dual_mov_b32 v6, s2 :: v_dual_mov_b32 v7, s3
+; GFX1250-NEXT: s_or_b32 s0, s1, s0
+; GFX1250-NEXT: s_or_b32 s1, s5, s2
+; GFX1250-NEXT: s_or_b32 s10, s10, s20
+; GFX1250-NEXT: s_or_b32 s8, s8, s16
+; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX1250-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s6
+; GFX1250-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9
+; GFX1250-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_store_b128 v[8:9], v[0:3], off
; GFX1250-NEXT: global_store_b128 v[10:11], v[4:7], off
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 51652a0..2ae6fc2 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -117,12 +117,12 @@ define amdgpu_kernel void @sadd64rr(ptr addrspace(1) %out, i64 %a, i64 %b) {
;
; GFX1250-LABEL: sadd64rr:
; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[4:5]
+; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[6:7]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -818,17 +818,17 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
;
; GFX1250-LABEL: suaddo64:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_add_nc_u64 s[6:7], s[4:5], s[6:7]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[6:7]
-; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[12:13], s[14:15]
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
+; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
-; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9]
+; GFX1250-NEXT: global_store_b8 v2, v3, s[10:11]
; GFX1250-NEXT: s_endpgm
%uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue { i64, i1 } %uadd, 0
@@ -1096,12 +1096,12 @@ define amdgpu_kernel void @ssub64rr(ptr addrspace(1) %out, i64 %a, i64 %b) {
;
; GFX1250-LABEL: ssub64rr:
; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_sub_nc_u64 s[2:3], s[2:3], s[4:5]
+; GFX1250-NEXT: s_sub_nc_u64 s[2:3], s[2:3], s[6:7]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1798,17 +1798,17 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
;
; GFX1250-LABEL: susubo64:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_sub_nc_u64 s[6:7], s[4:5], s[6:7]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[6:7]
-; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1250-NEXT: s_sub_nc_u64 s[0:1], s[12:13], s[14:15]
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
+; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
-; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9]
+; GFX1250-NEXT: global_store_b8 v2, v3, s[10:11]
; GFX1250-NEXT: s_endpgm
%usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue { i64, i1 } %usub, 0
@@ -3099,70 +3099,70 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
;
; GFX1250-LABEL: sudiv64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5]
+; GFX1250-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_b64 s[6:7], s[6:7], 0xffffffff00000000
-; GFX1250-NEXT: s_cmp_lg_u64 s[6:7], 0
+; GFX1250-NEXT: s_and_b64 s[4:5], s[4:5], 0xffffffff00000000
+; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4
; GFX1250-NEXT: ; %bb.1:
-; GFX1250-NEXT: s_cvt_f32_u32 s6, s4
-; GFX1250-NEXT: s_cvt_f32_u32 s7, s5
-; GFX1250-NEXT: s_sub_nc_u64 s[10:11], 0, s[4:5]
+; GFX1250-NEXT: s_cvt_f32_u32 s4, s6
+; GFX1250-NEXT: s_cvt_f32_u32 s5, s7
+; GFX1250-NEXT: s_sub_nc_u64 s[10:11], 0, s[6:7]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT: s_fmac_f32 s6, s7, 0x4f800000
-; GFX1250-NEXT: v_s_rcp_f32 s6, s6
+; GFX1250-NEXT: s_fmac_f32 s4, s5, 0x4f800000
+; GFX1250-NEXT: v_s_rcp_f32 s4, s4
; GFX1250-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT: s_mul_f32 s6, s6, 0x5f7ffffc
-; GFX1250-NEXT: s_mul_f32 s7, s6, 0x2f800000
+; GFX1250-NEXT: s_mul_f32 s4, s4, 0x5f7ffffc
+; GFX1250-NEXT: s_mul_f32 s5, s4, 0x2f800000
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT: s_trunc_f32 s7, s7
-; GFX1250-NEXT: s_fmac_f32 s6, s7, 0xcf800000
-; GFX1250-NEXT: s_cvt_u32_f32 s9, s7
-; GFX1250-NEXT: s_mov_b32 s7, 0
+; GFX1250-NEXT: s_trunc_f32 s5, s5
+; GFX1250-NEXT: s_fmac_f32 s4, s5, 0xcf800000
+; GFX1250-NEXT: s_cvt_u32_f32 s9, s5
+; GFX1250-NEXT: s_mov_b32 s5, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT: s_cvt_u32_f32 s8, s6
+; GFX1250-NEXT: s_cvt_u32_f32 s8, s4
; GFX1250-NEXT: s_mul_u64 s[12:13], s[10:11], s[8:9]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_mul_hi_u32 s15, s8, s13
; GFX1250-NEXT: s_mul_i32 s14, s8, s13
-; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s12
+; GFX1250-NEXT: s_mul_hi_u32 s4, s8, s12
; GFX1250-NEXT: s_mul_i32 s17, s9, s12
-; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[6:7], s[14:15]
+; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[4:5], s[14:15]
; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s12
; GFX1250-NEXT: s_mul_hi_u32 s18, s9, s13
-; GFX1250-NEXT: s_add_co_u32 s6, s14, s17
-; GFX1250-NEXT: s_add_co_ci_u32 s6, s15, s16
+; GFX1250-NEXT: s_add_co_u32 s4, s14, s17
+; GFX1250-NEXT: s_add_co_ci_u32 s4, s15, s16
; GFX1250-NEXT: s_mul_i32 s12, s9, s13
; GFX1250-NEXT: s_add_co_ci_u32 s13, s18, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13]
+; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[4:5], s[12:13]
; GFX1250-NEXT: s_add_co_u32 s8, s8, s12
-; GFX1250-NEXT: s_cselect_b32 s6, -1, 0
+; GFX1250-NEXT: s_cselect_b32 s4, -1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1250-NEXT: s_cmp_lg_u32 s4, 0
; GFX1250-NEXT: s_add_co_ci_u32 s9, s9, s13
; GFX1250-NEXT: s_mul_u64 s[10:11], s[10:11], s[8:9]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_mul_hi_u32 s13, s8, s11
; GFX1250-NEXT: s_mul_i32 s12, s8, s11
-; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s10
+; GFX1250-NEXT: s_mul_hi_u32 s4, s8, s10
; GFX1250-NEXT: s_mul_i32 s15, s9, s10
-; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13]
+; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[4:5], s[12:13]
; GFX1250-NEXT: s_mul_hi_u32 s14, s9, s10
; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s11
-; GFX1250-NEXT: s_add_co_u32 s6, s12, s15
-; GFX1250-NEXT: s_add_co_ci_u32 s6, s13, s14
+; GFX1250-NEXT: s_add_co_u32 s4, s12, s15
+; GFX1250-NEXT: s_add_co_ci_u32 s4, s13, s14
; GFX1250-NEXT: s_mul_i32 s10, s9, s11
; GFX1250-NEXT: s_add_co_ci_u32 s11, s16, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[6:7], s[10:11]
+; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[4:5], s[10:11]
; GFX1250-NEXT: s_add_co_u32 s8, s8, s10
; GFX1250-NEXT: s_cselect_b32 s10, -1, 0
-; GFX1250-NEXT: s_mul_hi_u32 s6, s2, s8
+; GFX1250-NEXT: s_mul_hi_u32 s4, s2, s8
; GFX1250-NEXT: s_cmp_lg_u32 s10, 0
; GFX1250-NEXT: s_mul_hi_u32 s12, s3, s8
; GFX1250-NEXT: s_add_co_ci_u32 s10, s9, s11
@@ -3170,33 +3170,33 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_mul_hi_u32 s9, s2, s10
; GFX1250-NEXT: s_mul_i32 s8, s2, s10
; GFX1250-NEXT: s_mul_hi_u32 s13, s3, s10
-; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[8:9]
+; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[8:9]
; GFX1250-NEXT: s_mul_i32 s10, s3, s10
-; GFX1250-NEXT: s_add_co_u32 s6, s8, s11
-; GFX1250-NEXT: s_add_co_ci_u32 s6, s9, s12
+; GFX1250-NEXT: s_add_co_u32 s4, s8, s11
+; GFX1250-NEXT: s_add_co_ci_u32 s4, s9, s12
; GFX1250-NEXT: s_add_co_ci_u32 s11, s13, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[10:11]
+; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[10:11]
; GFX1250-NEXT: s_and_b64 s[10:11], s[8:9], 0xffffffff00000000
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_or_b32 s10, s10, s8
-; GFX1250-NEXT: s_mul_u64 s[8:9], s[4:5], s[10:11]
+; GFX1250-NEXT: s_mul_u64 s[8:9], s[6:7], s[10:11]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_sub_co_u32 s6, s2, s8
+; GFX1250-NEXT: s_sub_co_u32 s4, s2, s8
; GFX1250-NEXT: s_cselect_b32 s8, -1, 0
; GFX1250-NEXT: s_sub_co_i32 s12, s3, s9
; GFX1250-NEXT: s_cmp_lg_u32 s8, 0
-; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s5
-; GFX1250-NEXT: s_sub_co_u32 s13, s6, s4
+; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s7
+; GFX1250-NEXT: s_sub_co_u32 s13, s4, s6
; GFX1250-NEXT: s_cselect_b32 s14, -1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_lg_u32 s14, 0
; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, 0
-; GFX1250-NEXT: s_cmp_ge_u32 s12, s5
+; GFX1250-NEXT: s_cmp_ge_u32 s12, s7
; GFX1250-NEXT: s_cselect_b32 s14, -1, 0
-; GFX1250-NEXT: s_cmp_ge_u32 s13, s4
+; GFX1250-NEXT: s_cmp_ge_u32 s13, s6
; GFX1250-NEXT: s_cselect_b32 s15, -1, 0
-; GFX1250-NEXT: s_cmp_eq_u32 s12, s5
+; GFX1250-NEXT: s_cmp_eq_u32 s12, s7
; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[10:11], 1
; GFX1250-NEXT: s_cselect_b32 s16, s15, s14
; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[10:11], 2
@@ -3206,20 +3206,20 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_cmp_lg_u32 s8, 0
; GFX1250-NEXT: s_sub_co_ci_u32 s3, s3, s9
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_cmp_ge_u32 s3, s5
+; GFX1250-NEXT: s_cmp_ge_u32 s3, s7
; GFX1250-NEXT: s_cselect_b32 s8, -1, 0
-; GFX1250-NEXT: s_cmp_ge_u32 s6, s4
-; GFX1250-NEXT: s_cselect_b32 s6, -1, 0
-; GFX1250-NEXT: s_cmp_eq_u32 s3, s5
-; GFX1250-NEXT: s_cselect_b32 s3, s6, s8
+; GFX1250-NEXT: s_cmp_ge_u32 s4, s6
+; GFX1250-NEXT: s_cselect_b32 s4, -1, 0
+; GFX1250-NEXT: s_cmp_eq_u32 s3, s7
+; GFX1250-NEXT: s_cselect_b32 s3, s4, s8
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_lg_u32 s3, 0
; GFX1250-NEXT: s_cselect_b32 s9, s13, s11
; GFX1250-NEXT: s_cselect_b32 s8, s12, s10
; GFX1250-NEXT: s_cbranch_execnz .LBB16_3
; GFX1250-NEXT: .LBB16_2:
-; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s4
-; GFX1250-NEXT: s_sub_co_i32 s5, 0, s4
+; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s6
+; GFX1250-NEXT: s_sub_co_i32 s4, 0, s6
; GFX1250-NEXT: s_mov_b32 s9, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(TRANS32_DEP_1)
; GFX1250-NEXT: v_rcp_iflag_f32_e32 v0, v0
@@ -3228,23 +3228,23 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX1250-NEXT: v_readfirstlane_b32 s3, v0
-; GFX1250-NEXT: s_mul_i32 s5, s5, s3
+; GFX1250-NEXT: s_mul_i32 s4, s4, s3
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_mul_hi_u32 s5, s3, s5
-; GFX1250-NEXT: s_add_co_i32 s3, s3, s5
+; GFX1250-NEXT: s_mul_hi_u32 s4, s3, s4
+; GFX1250-NEXT: s_add_co_i32 s3, s3, s4
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_mul_hi_u32 s3, s2, s3
-; GFX1250-NEXT: s_mul_i32 s5, s3, s4
+; GFX1250-NEXT: s_mul_i32 s4, s3, s6
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_sub_co_i32 s2, s2, s5
-; GFX1250-NEXT: s_add_co_i32 s5, s3, 1
-; GFX1250-NEXT: s_sub_co_i32 s6, s2, s4
-; GFX1250-NEXT: s_cmp_ge_u32 s2, s4
-; GFX1250-NEXT: s_cselect_b32 s3, s5, s3
-; GFX1250-NEXT: s_cselect_b32 s2, s6, s2
-; GFX1250-NEXT: s_add_co_i32 s5, s3, 1
-; GFX1250-NEXT: s_cmp_ge_u32 s2, s4
-; GFX1250-NEXT: s_cselect_b32 s8, s5, s3
+; GFX1250-NEXT: s_sub_co_i32 s2, s2, s4
+; GFX1250-NEXT: s_add_co_i32 s4, s3, 1
+; GFX1250-NEXT: s_sub_co_i32 s5, s2, s6
+; GFX1250-NEXT: s_cmp_ge_u32 s2, s6
+; GFX1250-NEXT: s_cselect_b32 s3, s4, s3
+; GFX1250-NEXT: s_cselect_b32 s2, s5, s2
+; GFX1250-NEXT: s_add_co_i32 s4, s3, 1
+; GFX1250-NEXT: s_cmp_ge_u32 s2, s6
+; GFX1250-NEXT: s_cselect_b32 s8, s4, s3
; GFX1250-NEXT: .LBB16_3:
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir
index 029aa39..c475efb 100644
--- a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir
@@ -128,13 +128,13 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]]
; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY1]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY2]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY2]]
; CHECK-NEXT: SI_RETURN
%0:vreg_64 = COPY $vgpr0_vgpr1
%1:vreg_64 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128 = COPY %0
%2.sub2_sub3:areg_128 = COPY %1
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -153,13 +153,13 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]]
; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY1]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY2]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY2]]
; CHECK-NEXT: SI_RETURN
%0:vreg_64 = COPY $vgpr0_vgpr1
%1:vreg_64 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128_align2 = COPY %0
%2.sub2_sub3:areg_128_align2 = COPY %1
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
@@ -398,14 +398,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vgpr_32 = COPY $vgpr0
undef %1.sub0:areg_128 = COPY %0
%1.sub1:areg_128 = COPY %0
%1.sub2:areg_128 = COPY %0
%1.sub3:areg_128 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1
SI_RETURN
...
@@ -425,14 +425,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vgpr_32 = COPY $vgpr0
undef %1.sub0:areg_128_align2 = COPY %0
%1.sub1:areg_128_align2 = COPY %0
%1.sub2:areg_128_align2 = COPY %0
%1.sub3:areg_128_align2 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1
SI_RETURN
...
@@ -504,7 +504,7 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_64 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY [[COPY]].sub0
; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 4325385 /* reguse:AReg_64 */, [[COPY1]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 3670025 /* reguse:VS_64_with_sub1 */, [[COPY]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 3735561 /* reguse:VReg_64 */, [[COPY]]
; CHECK-NEXT: SI_RETURN
%0:vgpr_32 = COPY $vgpr0
undef %1.sub0:areg_64 = COPY %0
@@ -512,7 +512,7 @@ body: |
undef %2.sub0:vreg_64 = COPY %0
%2.sub1:vreg_64 = COPY %0
INLINEASM &"; use $0", 0 /* attdialect */, 4325385 /* reguse:AReg_64 */, killed %1
- INLINEASM &"; use $0", 0 /* attdialect */, 3670025 /* reguse:VReg_64 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 3735561 /* reguse:VReg_64 */, killed %2
SI_RETURN
...
@@ -641,13 +641,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0_sub1:vreg_128 =COPY $vgpr0_vgpr1
%0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1
%2.sub2_sub3:areg_128 = COPY %0.sub2_sub3
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -668,13 +668,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_128 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub1
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0:vreg_128 =COPY $vgpr0_vgpr1
%0.sub1:vreg_128 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0
%2.sub2_sub3:areg_128_align2 = COPY %0.sub1
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
@@ -890,14 +890,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]].sub0
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0:vreg_64 = COPY $vgpr0
undef %1.sub0:areg_128 = COPY %0.sub0
%1.sub1:areg_128 = COPY %0.sub0
%1.sub2:areg_128 = COPY %0.sub0
%1.sub3:areg_128 = COPY %0.sub0
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1
SI_RETURN
...
@@ -917,14 +917,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]].sub0
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0:vreg_64 = COPY $vgpr0
undef %1.sub0:areg_128_align2 = COPY %0.sub0
%1.sub1:areg_128_align2 = COPY %0.sub0
%1.sub2:areg_128_align2 = COPY %0.sub0
%1.sub3:areg_128_align2 = COPY %0.sub0
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1
SI_RETURN
...
@@ -1051,13 +1051,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0_sub1:vreg_128 = COPY $vgpr0_vgpr1
%0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1
%2.sub2_sub3:areg_128 = COPY %0.sub2_sub3
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -1076,13 +1076,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0_sub1
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub2_sub3
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0_sub1:vreg_128_align2 = COPY $vgpr0_vgpr1
%0.sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0_sub1
%2.sub2_sub3:areg_128_align2 = COPY %0.sub2_sub3
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
@@ -1358,11 +1358,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%2:areg_128 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -1379,11 +1379,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128_align2 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%2:areg_128_align2 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
diff --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
index be60a00..0cae0e5 100644
--- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
@@ -705,12 +705,13 @@ define amdgpu_kernel void @write2_ptr_subreg_arg_two_val_f32(ptr addrspace(1) %C
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scale_offset
-; GFX1250-NEXT: global_load_b32 v0, v0, s[2:3] scale_offset
-; GFX1250-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3] scale_offset
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s5
; GFX1250-NEXT: s_wait_loadcnt 0x1
-; GFX1250-NEXT: ds_store_b32 v2, v1 offset:32
+; GFX1250-NEXT: ds_store_b32 v0, v1 offset:32
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: ds_store_b32 v3, v0 offset:32
+; GFX1250-NEXT: ds_store_b32 v3, v2 offset:32
; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %x.i
@@ -1282,14 +1283,14 @@ define amdgpu_kernel void @simple_write2_v4f32_superreg_align4(ptr addrspace(3)
;
; GFX1250-LABEL: simple_write2_v4f32_superreg_align4:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x8
+; GFX1250-NEXT: s_load_b32 s8, s[4:5], 0x0
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1250-NEXT: s_load_b128 s[0:3], s[6:7], 0x0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 4, s4
+; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 4, s8
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
; GFX1250-NEXT: v_dual_mov_b32 v3, s0 :: v_dual_mov_b32 v4, s1
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index 1e7855c..eefc781 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -541,11 +541,10 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -570,9 +569,8 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB10_2
; GFX1250-SDAG-NEXT: .LBB10_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: s_clause 0x1
@@ -586,14 +584,13 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
;
; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -618,10 +615,9 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB10_2
; GFX1250-GISEL-NEXT: .LBB10_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
@@ -727,13 +723,12 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -754,9 +749,8 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB11_2
; GFX1250-SDAG-NEXT: .LBB11_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: s_clause 0x1
@@ -770,8 +764,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
;
; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -780,7 +773,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -805,10 +798,9 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB11_2
; GFX1250-GISEL-NEXT: .LBB11_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
@@ -917,11 +909,10 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -943,9 +934,8 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB12_2
; GFX1250-SDAG-NEXT: .LBB12_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off
@@ -953,15 +943,14 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_3
@@ -982,10 +971,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB12_2
; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off
; GFX1250-GISEL-NEXT: s_endpgm
@@ -1069,11 +1057,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_3
@@ -1094,9 +1080,8 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB13_2
; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off
@@ -1104,8 +1089,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
;
; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -1115,7 +1099,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_3
@@ -1136,10 +1120,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB13_2
; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off
; GFX1250-GISEL-NEXT: s_endpgm
@@ -1400,11 +1383,10 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -1429,9 +1411,8 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB18_2
; GFX1250-SDAG-NEXT: .LBB18_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -1445,14 +1426,13 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -1477,10 +1457,9 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB18_2
; GFX1250-GISEL-NEXT: .LBB18_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -1590,13 +1569,12 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1617,9 +1595,8 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB19_2
; GFX1250-SDAG-NEXT: .LBB19_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -1633,8 +1610,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -1643,7 +1619,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -1668,10 +1644,9 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB19_2
; GFX1250-GISEL-NEXT: .LBB19_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -1784,11 +1759,10 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -1810,9 +1784,8 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB20_2
; GFX1250-SDAG-NEXT: .LBB20_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -1823,15 +1796,14 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_3
@@ -1852,10 +1824,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB20_2
; GFX1250-GISEL-NEXT: .LBB20_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -1950,11 +1921,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_3
@@ -1975,9 +1944,8 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB21_2
; GFX1250-SDAG-NEXT: .LBB21_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -1988,8 +1956,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -1999,7 +1966,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_3
@@ -2020,10 +1987,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB21_2
; GFX1250-GISEL-NEXT: .LBB21_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -2295,11 +2261,10 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -2324,9 +2289,8 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB26_2
; GFX1250-SDAG-NEXT: .LBB26_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -2340,14 +2304,13 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -2372,10 +2335,9 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB26_2
; GFX1250-GISEL-NEXT: .LBB26_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -2487,13 +2449,12 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2514,9 +2475,8 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB27_2
; GFX1250-SDAG-NEXT: .LBB27_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -2530,8 +2490,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -2540,7 +2499,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -2565,10 +2524,9 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB27_2
; GFX1250-GISEL-NEXT: .LBB27_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -2683,11 +2641,10 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -2709,9 +2666,8 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB28_2
; GFX1250-SDAG-NEXT: .LBB28_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -2722,15 +2678,14 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_3
@@ -2751,10 +2706,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB28_2
; GFX1250-GISEL-NEXT: .LBB28_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -2851,11 +2805,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_3
@@ -2876,9 +2828,8 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB29_2
; GFX1250-SDAG-NEXT: .LBB29_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -2889,8 +2840,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -2900,7 +2850,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_3
@@ -2921,10 +2871,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB29_2
; GFX1250-GISEL-NEXT: .LBB29_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -3198,11 +3147,10 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -3227,9 +3175,8 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB34_2
; GFX1250-SDAG-NEXT: .LBB34_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -3244,14 +3191,13 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -3276,10 +3222,9 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB34_2
; GFX1250-GISEL-NEXT: .LBB34_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -3390,13 +3335,12 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3417,9 +3361,8 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB35_2
; GFX1250-SDAG-NEXT: .LBB35_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -3434,8 +3377,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -3444,7 +3386,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -3469,10 +3411,9 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB35_2
; GFX1250-GISEL-NEXT: .LBB35_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -3586,11 +3527,10 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -3612,9 +3552,8 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB36_2
; GFX1250-SDAG-NEXT: .LBB36_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -3626,15 +3565,14 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_3
@@ -3655,10 +3593,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB36_2
; GFX1250-GISEL-NEXT: .LBB36_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -3754,11 +3691,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_3
@@ -3779,9 +3714,8 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB37_2
; GFX1250-SDAG-NEXT: .LBB37_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -3793,8 +3727,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -3804,7 +3737,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_3
@@ -3825,10 +3758,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB37_2
; GFX1250-GISEL-NEXT: .LBB37_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -4101,11 +4033,10 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -4130,9 +4061,8 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB42_2
; GFX1250-SDAG-NEXT: .LBB42_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -4147,14 +4077,13 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
;
; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -4179,10 +4108,9 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB42_2
; GFX1250-GISEL-NEXT: .LBB42_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -4293,13 +4221,12 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4320,9 +4247,8 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB43_2
; GFX1250-SDAG-NEXT: .LBB43_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -4337,8 +4263,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
;
; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -4347,7 +4272,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -4372,10 +4297,9 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB43_2
; GFX1250-GISEL-NEXT: .LBB43_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -4489,11 +4413,10 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -4515,9 +4438,8 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB44_2
; GFX1250-SDAG-NEXT: .LBB44_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -4529,15 +4451,14 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
;
; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_3
@@ -4558,10 +4479,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB44_2
; GFX1250-GISEL-NEXT: .LBB44_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -4657,11 +4577,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_3
@@ -4682,9 +4600,8 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB45_2
; GFX1250-SDAG-NEXT: .LBB45_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -4696,8 +4613,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
;
; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -4707,7 +4623,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_3
@@ -4728,10 +4644,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB45_2
; GFX1250-GISEL-NEXT: .LBB45_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -5004,11 +4919,10 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -5033,9 +4947,8 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB50_2
; GFX1250-SDAG-NEXT: .LBB50_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -5050,14 +4963,13 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -5082,10 +4994,9 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB50_2
; GFX1250-GISEL-NEXT: .LBB50_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -5196,13 +5107,12 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -5223,9 +5133,8 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB51_2
; GFX1250-SDAG-NEXT: .LBB51_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -5240,8 +5149,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -5250,7 +5158,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -5275,10 +5183,9 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB51_2
; GFX1250-GISEL-NEXT: .LBB51_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -5392,11 +5299,10 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -5418,9 +5324,8 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB52_2
; GFX1250-SDAG-NEXT: .LBB52_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -5432,15 +5337,14 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_3
@@ -5461,10 +5365,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB52_2
; GFX1250-GISEL-NEXT: .LBB52_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -5560,11 +5463,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_3
@@ -5585,9 +5486,8 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB53_2
; GFX1250-SDAG-NEXT: .LBB53_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -5599,8 +5499,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -5610,7 +5509,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_3
@@ -5631,10 +5530,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB53_2
; GFX1250-GISEL-NEXT: .LBB53_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -5877,11 +5775,10 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -5902,9 +5799,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB58_2
; GFX1250-SDAG-NEXT: .LBB58_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -5918,14 +5814,13 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -5946,10 +5841,9 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB58_2
; GFX1250-GISEL-NEXT: .LBB58_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -6061,13 +5955,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -6084,9 +5977,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB59_2
; GFX1250-SDAG-NEXT: .LBB59_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -6100,8 +5992,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -6110,7 +6001,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -6131,10 +6022,9 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB59_2
; GFX1250-GISEL-NEXT: .LBB59_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -6249,11 +6139,10 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -6272,9 +6161,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB60_2
; GFX1250-SDAG-NEXT: .LBB60_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -6285,15 +6173,14 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_3
@@ -6311,10 +6198,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB60_2
; GFX1250-GISEL-NEXT: .LBB60_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -6409,11 +6295,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_3
@@ -6431,9 +6315,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB61_2
; GFX1250-SDAG-NEXT: .LBB61_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -6444,8 +6327,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -6455,7 +6337,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_3
@@ -6473,10 +6355,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB61_2
; GFX1250-GISEL-NEXT: .LBB61_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -6718,11 +6599,10 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -6743,9 +6623,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB66_2
; GFX1250-SDAG-NEXT: .LBB66_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -6759,14 +6638,13 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -6787,10 +6665,9 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB66_2
; GFX1250-GISEL-NEXT: .LBB66_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -6902,13 +6779,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -6925,9 +6801,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB67_2
; GFX1250-SDAG-NEXT: .LBB67_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -6941,8 +6816,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -6951,7 +6825,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -6972,10 +6846,9 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB67_2
; GFX1250-GISEL-NEXT: .LBB67_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -7090,11 +6963,10 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -7113,9 +6985,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB68_2
; GFX1250-SDAG-NEXT: .LBB68_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -7126,15 +6997,14 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_3
@@ -7152,10 +7022,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB68_2
; GFX1250-GISEL-NEXT: .LBB68_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -7250,11 +7119,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_3
@@ -7272,9 +7139,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB69_2
; GFX1250-SDAG-NEXT: .LBB69_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -7285,8 +7151,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -7296,7 +7161,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_3
@@ -7314,10 +7179,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB69_2
; GFX1250-GISEL-NEXT: .LBB69_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -7559,11 +7423,10 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -7584,9 +7447,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB74_2
; GFX1250-SDAG-NEXT: .LBB74_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -7600,14 +7462,13 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
;
; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -7628,10 +7489,9 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB74_2
; GFX1250-GISEL-NEXT: .LBB74_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -7743,13 +7603,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -7766,9 +7625,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB75_2
; GFX1250-SDAG-NEXT: .LBB75_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -7782,8 +7640,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
;
; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -7792,7 +7649,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -7813,10 +7670,9 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB75_2
; GFX1250-GISEL-NEXT: .LBB75_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -7931,11 +7787,10 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -7954,9 +7809,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB76_2
; GFX1250-SDAG-NEXT: .LBB76_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -7967,15 +7821,14 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_3
@@ -7993,10 +7846,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB76_2
; GFX1250-GISEL-NEXT: .LBB76_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -8091,11 +7943,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_3
@@ -8113,9 +7963,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB77_2
; GFX1250-SDAG-NEXT: .LBB77_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -8126,8 +7975,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
;
; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -8137,7 +7985,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_3
@@ -8155,10 +8003,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB77_2
; GFX1250-GISEL-NEXT: .LBB77_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -8400,11 +8247,10 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -8425,9 +8271,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB82_2
; GFX1250-SDAG-NEXT: .LBB82_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -8441,14 +8286,13 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
;
; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -8469,10 +8313,9 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB82_2
; GFX1250-GISEL-NEXT: .LBB82_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -8584,13 +8427,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -8607,9 +8449,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB83_2
; GFX1250-SDAG-NEXT: .LBB83_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -8623,8 +8464,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
;
; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -8633,7 +8473,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -8654,10 +8494,9 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB83_2
; GFX1250-GISEL-NEXT: .LBB83_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -8772,11 +8611,10 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -8795,9 +8633,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB84_2
; GFX1250-SDAG-NEXT: .LBB84_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -8808,15 +8645,14 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_3
@@ -8834,10 +8670,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB84_2
; GFX1250-GISEL-NEXT: .LBB84_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -8932,11 +8767,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_3
@@ -8954,9 +8787,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB85_2
; GFX1250-SDAG-NEXT: .LBB85_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -8967,8 +8799,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
;
; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -8978,7 +8809,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_3
@@ -8996,10 +8827,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB85_2
; GFX1250-GISEL-NEXT: .LBB85_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -9281,12 +9111,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -9311,9 +9140,8 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB90_2
; GFX1250-SDAG-NEXT: .LBB90_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off
@@ -9328,15 +9156,14 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
;
; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v0, v5
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v3, v10
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v3 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -9361,10 +9188,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB90_2
; GFX1250-GISEL-NEXT: .LBB90_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -9485,13 +9311,12 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -9512,9 +9337,8 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB91_2
; GFX1250-SDAG-NEXT: .LBB91_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off
@@ -9529,10 +9353,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
;
; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -9540,7 +9363,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v3, v10
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v3 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -9565,10 +9388,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB91_2
; GFX1250-GISEL-NEXT: .LBB91_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -9690,13 +9512,12 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
-; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -9718,9 +9539,8 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB92_2
; GFX1250-SDAG-NEXT: .LBB92_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off
@@ -9732,15 +9552,15 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
;
; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, src_flat_scratch_base_hi :: v_dual_mov_b32 v8, v1
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v1, v3, v5 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_3
@@ -9761,10 +9581,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB92_2
; GFX1250-GISEL-NEXT: .LBB92_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -9869,11 +9688,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_3
@@ -9894,9 +9711,8 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB93_2
; GFX1250-SDAG-NEXT: .LBB93_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off
@@ -9908,18 +9724,18 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
;
; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, src_flat_scratch_base_hi :: v_dual_mov_b32 v8, v1
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v1, v3, v5 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_3
@@ -9940,10 +9756,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB93_2
; GFX1250-GISEL-NEXT: .LBB93_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -10188,11 +10003,10 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -10214,10 +10028,9 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB98_2
; GFX1250-SDAG-NEXT: .LBB98_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
@@ -10233,14 +10046,13 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -10262,18 +10074,17 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB98_2
; GFX1250-GISEL-NEXT: .LBB98_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -10386,13 +10197,12 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -10410,10 +10220,9 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB99_2
; GFX1250-SDAG-NEXT: .LBB99_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
@@ -10429,8 +10238,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -10439,7 +10247,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -10461,18 +10269,17 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB99_2
; GFX1250-GISEL-NEXT: .LBB99_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -10588,11 +10395,10 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -10610,9 +10416,8 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB100_2
; GFX1250-SDAG-NEXT: .LBB100_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
@@ -10625,15 +10430,14 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_3
@@ -10650,17 +10454,16 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB100_2
; GFX1250-GISEL-NEXT: .LBB100_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off
; GFX1250-GISEL-NEXT: s_endpgm
@@ -10754,11 +10557,9 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_3
@@ -10775,9 +10576,8 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB101_2
; GFX1250-SDAG-NEXT: .LBB101_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
@@ -10790,8 +10590,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -10801,7 +10600,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_3
@@ -10818,17 +10617,16 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB101_2
; GFX1250-GISEL-NEXT: .LBB101_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off
; GFX1250-GISEL-NEXT: s_endpgm
@@ -11064,11 +10862,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -11090,10 +10887,9 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB106_2
; GFX1250-SDAG-NEXT: .LBB106_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
@@ -11112,14 +10908,13 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
;
; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -11141,11 +10936,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB106_2
; GFX1250-GISEL-NEXT: .LBB106_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -11268,13 +11062,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -11292,10 +11085,9 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB107_2
; GFX1250-SDAG-NEXT: .LBB107_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
@@ -11314,8 +11106,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
;
; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
@@ -11324,7 +11115,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14
; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -11346,11 +11137,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB107_2
; GFX1250-GISEL-NEXT: .LBB107_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -11476,11 +11266,10 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
@@ -11498,9 +11287,8 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB108_2
; GFX1250-SDAG-NEXT: .LBB108_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -11516,15 +11304,14 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
;
; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_3
@@ -11541,10 +11328,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB108_2
; GFX1250-GISEL-NEXT: .LBB108_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -11648,11 +11434,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1
; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_3
@@ -11669,9 +11453,8 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB109_2
; GFX1250-SDAG-NEXT: .LBB109_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -11687,8 +11470,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
;
; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn_neg128:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -11698,7 +11480,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3
; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_3
@@ -11715,10 +11497,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB109_2
; GFX1250-GISEL-NEXT: .LBB109_4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -11834,12 +11615,10 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %atomicrmw.check.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_4
; GFX1250-SDAG-NEXT: ; %bb.2: ; %atomicrmw.global
@@ -11855,9 +11634,7 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: .LBB110_4:
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX1250-SDAG-NEXT: .LBB110_5: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, s2, -1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s2
@@ -11891,10 +11668,9 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB110_6
; GFX1250-GISEL-NEXT: ; %bb.1: ; %atomicrmw.check.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s2, 0x4000000
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB110_3
@@ -11910,9 +11686,7 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB110_5
; GFX1250-GISEL-NEXT: ; %bb.4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-GISEL-NEXT: s_cselect_b32 s2, s2, -1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s2
@@ -12060,12 +11834,10 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: .LBB111_2: ; %atomicrmw.phi
; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
; GFX1250-SDAG-NEXT: .LBB111_3: ; %atomicrmw.check.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX1250-SDAG-NEXT: s_mov_b32 s2, -1
; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB111_5
@@ -12079,9 +11851,7 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX1250-SDAG-NEXT: s_cbranch_vccnz .LBB111_7
; GFX1250-SDAG-NEXT: ; %bb.6: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, s2, -1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s2
@@ -12112,9 +11882,8 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s1, s3
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB111_6
; GFX1250-GISEL-NEXT: ; %bb.1: ; %atomicrmw.check.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s2, 0x4000000
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB111_3
@@ -12130,9 +11899,7 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB111_5
; GFX1250-GISEL-NEXT: ; %bb.4: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-GISEL-NEXT: s_cselect_b32 s2, s2, -1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s2
@@ -12261,9 +12028,8 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -12279,10 +12045,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: .LBB112_2:
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX1250-SDAG-NEXT: .LBB112_3: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -12301,10 +12066,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50
; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0
-; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4
+; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB112_2
@@ -12320,10 +12084,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB112_4
; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -12413,9 +12176,8 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -12434,11 +12196,10 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB113_2
; GFX1250-SDAG-NEXT: .LBB113_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
+; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2
-; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -12454,9 +12215,8 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50
; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0
-; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4
+; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB113_2
@@ -12473,10 +12233,9 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB113_4
; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -12562,9 +12321,8 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -12580,10 +12338,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: .LBB114_2:
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX1250-SDAG-NEXT: .LBB114_3: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -12602,10 +12359,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50
; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0
-; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4
+; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB114_2
@@ -12621,10 +12377,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB114_4
; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -12714,9 +12469,8 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2
+; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -12735,11 +12489,10 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB115_2
; GFX1250-SDAG-NEXT: .LBB115_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
+; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2
-; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -12755,9 +12508,8 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50
; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0
-; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4
+; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1
; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB115_2
@@ -12774,10 +12526,9 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) {
; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB115_4
; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
index 2079543..b5b2655 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
@@ -2179,6 +2179,7 @@ define amdgpu_ps void @flat_addr_64bit_lsr_iv_multiload(ptr inreg %arg, ptr inre
; GFX1250-SDAG-NEXT: flat_load_b32 v1, v0, s[4:5] scope:SCOPE_SYS
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: s_cmp_eq_u32 s0, 0x400
+; GFX1250-SDAG-NEXT: ; kill: killed $sgpr4_sgpr5
; GFX1250-SDAG-NEXT: s_cbranch_scc0 .LBB117_1
; GFX1250-SDAG-NEXT: ; %bb.2: ; %bb2
; GFX1250-SDAG-NEXT: s_endpgm
@@ -2190,15 +2191,16 @@ define amdgpu_ps void @flat_addr_64bit_lsr_iv_multiload(ptr inreg %arg, ptr inre
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1]
; GFX1250-GISEL-NEXT: .LBB117_1: ; %bb3
; GFX1250-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v1, v3, vcc_lo
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 4, v[2:3]
+; GFX1250-GISEL-NEXT: ; kill: killed $vgpr4 killed $vgpr5
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: flat_load_b32 v6, v[4:5] scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: flat_load_b32 v6, v[4:5] scope:SCOPE_SYS
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: flat_load_b32 v4, v[4:5] scope:SCOPE_SYS
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0x400, v2
; GFX1250-GISEL-NEXT: s_cbranch_vccz .LBB117_1
diff --git a/llvm/test/CodeGen/AMDGPU/fmax3.ll b/llvm/test/CodeGen/AMDGPU/fmax3.ll
index 4827f75..5e6de6d 100644
--- a/llvm/test/CodeGen/AMDGPU/fmax3.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmax3.ll
@@ -162,32 +162,32 @@ define amdgpu_kernel void @test_fmax3_olt_0_f32(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-LABEL: test_fmax3_olt_0_f32:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s18, s10
-; GFX1250-NEXT: s_mov_b32 s19, s11
-; GFX1250-NEXT: s_mov_b32 s22, s10
-; GFX1250-NEXT: s_mov_b32 s23, s11
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s6, s2
+; GFX1250-NEXT: s_mov_b32 s7, s3
+; GFX1250-NEXT: s_mov_b32 s18, s2
+; GFX1250-NEXT: s_mov_b32 s19, s3
+; GFX1250-NEXT: s_mov_b32 s22, s2
+; GFX1250-NEXT: s_mov_b32 s23, s3
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s12, s2
-; GFX1250-NEXT: s_mov_b32 s13, s3
-; GFX1250-NEXT: s_mov_b32 s16, s4
-; GFX1250-NEXT: s_mov_b32 s17, s5
-; GFX1250-NEXT: s_mov_b32 s20, s6
-; GFX1250-NEXT: s_mov_b32 s21, s7
-; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s10
+; GFX1250-NEXT: s_mov_b32 s5, s11
+; GFX1250-NEXT: s_mov_b32 s16, s12
+; GFX1250-NEXT: s_mov_b32 s17, s13
+; GFX1250-NEXT: s_mov_b32 s20, s14
+; GFX1250-NEXT: s_mov_b32 s21, s15
+; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: s_mov_b32 s0, s8
+; GFX1250-NEXT: s_mov_b32 s1, s9
; GFX1250-NEXT: v_max3_num_f32 v0, v0, v1, v2
-; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1250-NEXT: s_endpgm
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
@@ -352,32 +352,32 @@ define amdgpu_kernel void @test_fmax3_olt_1_f32(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-LABEL: test_fmax3_olt_1_f32:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s18, s10
-; GFX1250-NEXT: s_mov_b32 s19, s11
-; GFX1250-NEXT: s_mov_b32 s22, s10
-; GFX1250-NEXT: s_mov_b32 s23, s11
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s6, s2
+; GFX1250-NEXT: s_mov_b32 s7, s3
+; GFX1250-NEXT: s_mov_b32 s18, s2
+; GFX1250-NEXT: s_mov_b32 s19, s3
+; GFX1250-NEXT: s_mov_b32 s22, s2
+; GFX1250-NEXT: s_mov_b32 s23, s3
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s12, s2
-; GFX1250-NEXT: s_mov_b32 s13, s3
-; GFX1250-NEXT: s_mov_b32 s16, s4
-; GFX1250-NEXT: s_mov_b32 s17, s5
-; GFX1250-NEXT: s_mov_b32 s20, s6
-; GFX1250-NEXT: s_mov_b32 s21, s7
-; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s10
+; GFX1250-NEXT: s_mov_b32 s5, s11
+; GFX1250-NEXT: s_mov_b32 s16, s12
+; GFX1250-NEXT: s_mov_b32 s17, s13
+; GFX1250-NEXT: s_mov_b32 s20, s14
+; GFX1250-NEXT: s_mov_b32 s21, s15
+; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: s_mov_b32 s0, s8
+; GFX1250-NEXT: s_mov_b32 s1, s9
; GFX1250-NEXT: v_max3_num_f32 v0, v2, v0, v1
-; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1250-NEXT: s_endpgm
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
@@ -609,62 +609,62 @@ define amdgpu_kernel void @test_fmax3_olt_0_f16(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-TRUE16-LABEL: test_fmax3_olt_0_f16:
; GFX1250-TRUE16: ; %bb.0:
-; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9
; GFX1250-TRUE16-NEXT: v_max3_num_f16 v0.l, v0.l, v1.l, v2.l
-; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-TRUE16-NEXT: s_endpgm
;
; GFX1250-FAKE16-LABEL: test_fmax3_olt_0_f16:
; GFX1250-FAKE16: ; %bb.0:
-; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9
; GFX1250-FAKE16-NEXT: v_max3_num_f16 v0, v0, v1, v2
-; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-FAKE16-NEXT: s_endpgm
%a = load volatile half, ptr addrspace(1) %aptr, align 2
%b = load volatile half, ptr addrspace(1) %bptr, align 2
@@ -897,62 +897,62 @@ define amdgpu_kernel void @test_fmax3_olt_1_f16(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-TRUE16-LABEL: test_fmax3_olt_1_f16:
; GFX1250-TRUE16: ; %bb.0:
-; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9
; GFX1250-TRUE16-NEXT: v_max3_num_f16 v0.l, v0.l, v1.l, v2.l
-; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-TRUE16-NEXT: s_endpgm
;
; GFX1250-FAKE16-LABEL: test_fmax3_olt_1_f16:
; GFX1250-FAKE16: ; %bb.0:
-; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9
; GFX1250-FAKE16-NEXT: v_max3_num_f16 v0, v2, v0, v1
-; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-FAKE16-NEXT: s_endpgm
%a = load volatile half, ptr addrspace(1) %aptr, align 2
%b = load volatile half, ptr addrspace(1) %bptr, align 2
diff --git a/llvm/test/CodeGen/AMDGPU/fmin3.ll b/llvm/test/CodeGen/AMDGPU/fmin3.ll
index 6dfefd8..6a6f232 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin3.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin3.ll
@@ -162,32 +162,32 @@ define amdgpu_kernel void @test_fmin3_olt_0_f32(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-LABEL: test_fmin3_olt_0_f32:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s18, s10
-; GFX1250-NEXT: s_mov_b32 s19, s11
-; GFX1250-NEXT: s_mov_b32 s22, s10
-; GFX1250-NEXT: s_mov_b32 s23, s11
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s6, s2
+; GFX1250-NEXT: s_mov_b32 s7, s3
+; GFX1250-NEXT: s_mov_b32 s18, s2
+; GFX1250-NEXT: s_mov_b32 s19, s3
+; GFX1250-NEXT: s_mov_b32 s22, s2
+; GFX1250-NEXT: s_mov_b32 s23, s3
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s12, s2
-; GFX1250-NEXT: s_mov_b32 s13, s3
-; GFX1250-NEXT: s_mov_b32 s16, s4
-; GFX1250-NEXT: s_mov_b32 s17, s5
-; GFX1250-NEXT: s_mov_b32 s20, s6
-; GFX1250-NEXT: s_mov_b32 s21, s7
-; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s10
+; GFX1250-NEXT: s_mov_b32 s5, s11
+; GFX1250-NEXT: s_mov_b32 s16, s12
+; GFX1250-NEXT: s_mov_b32 s17, s13
+; GFX1250-NEXT: s_mov_b32 s20, s14
+; GFX1250-NEXT: s_mov_b32 s21, s15
+; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: s_mov_b32 s0, s8
+; GFX1250-NEXT: s_mov_b32 s1, s9
; GFX1250-NEXT: v_min3_num_f32 v0, v0, v1, v2
-; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1250-NEXT: s_endpgm
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
@@ -352,32 +352,32 @@ define amdgpu_kernel void @test_fmin3_olt_1_f32(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-LABEL: test_fmin3_olt_1_f32:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s18, s10
-; GFX1250-NEXT: s_mov_b32 s19, s11
-; GFX1250-NEXT: s_mov_b32 s22, s10
-; GFX1250-NEXT: s_mov_b32 s23, s11
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s6, s2
+; GFX1250-NEXT: s_mov_b32 s7, s3
+; GFX1250-NEXT: s_mov_b32 s18, s2
+; GFX1250-NEXT: s_mov_b32 s19, s3
+; GFX1250-NEXT: s_mov_b32 s22, s2
+; GFX1250-NEXT: s_mov_b32 s23, s3
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s12, s2
-; GFX1250-NEXT: s_mov_b32 s13, s3
-; GFX1250-NEXT: s_mov_b32 s16, s4
-; GFX1250-NEXT: s_mov_b32 s17, s5
-; GFX1250-NEXT: s_mov_b32 s20, s6
-; GFX1250-NEXT: s_mov_b32 s21, s7
-; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s10
+; GFX1250-NEXT: s_mov_b32 s5, s11
+; GFX1250-NEXT: s_mov_b32 s16, s12
+; GFX1250-NEXT: s_mov_b32 s17, s13
+; GFX1250-NEXT: s_mov_b32 s20, s14
+; GFX1250-NEXT: s_mov_b32 s21, s15
+; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: s_mov_b32 s0, s8
+; GFX1250-NEXT: s_mov_b32 s1, s9
; GFX1250-NEXT: v_min3_num_f32 v0, v2, v0, v1
-; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1250-NEXT: s_endpgm
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
@@ -609,62 +609,62 @@ define amdgpu_kernel void @test_fmin3_olt_0_f16(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-TRUE16-LABEL: test_fmin3_olt_0_f16:
; GFX1250-TRUE16: ; %bb.0:
-; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9
; GFX1250-TRUE16-NEXT: v_min3_num_f16 v0.l, v0.l, v1.l, v2.l
-; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-TRUE16-NEXT: s_endpgm
;
; GFX1250-FAKE16-LABEL: test_fmin3_olt_0_f16:
; GFX1250-FAKE16: ; %bb.0:
-; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9
; GFX1250-FAKE16-NEXT: v_min3_num_f16 v0, v0, v1, v2
-; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-FAKE16-NEXT: s_endpgm
%a = load volatile half, ptr addrspace(1) %aptr, align 2
%b = load volatile half, ptr addrspace(1) %bptr, align 2
@@ -897,62 +897,62 @@ define amdgpu_kernel void @test_fmin3_olt_1_f16(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-TRUE16-LABEL: test_fmin3_olt_1_f16:
; GFX1250-TRUE16: ; %bb.0:
-; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9
; GFX1250-TRUE16-NEXT: v_min3_num_f16 v0.l, v0.l, v1.l, v2.l
-; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-TRUE16-NEXT: s_endpgm
;
; GFX1250-FAKE16-LABEL: test_fmin3_olt_1_f16:
; GFX1250-FAKE16: ; %bb.0:
-; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11
-; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10
-; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11
+; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3
+; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2
+; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2
-; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3
-; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4
-; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5
-; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6
-; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7
-; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10
+; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11
+; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12
+; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13
+; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14
+; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15
+; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS
; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0
-; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1
+; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8
+; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9
; GFX1250-FAKE16-NEXT: v_min3_num_f16 v0, v2, v0, v1
-; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1250-FAKE16-NEXT: s_endpgm
%a = load volatile half, ptr addrspace(1) %aptr, align 2
%b = load volatile half, ptr addrspace(1) %bptr, align 2
@@ -1217,36 +1217,36 @@ define amdgpu_kernel void @test_fmin3_olt_0_f64(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-LABEL: test_fmin3_olt_0_f64:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s18, s10
-; GFX1250-NEXT: s_mov_b32 s19, s11
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s6, s2
+; GFX1250-NEXT: s_mov_b32 s7, s3
+; GFX1250-NEXT: s_mov_b32 s18, s2
+; GFX1250-NEXT: s_mov_b32 s19, s3
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s12, s2
-; GFX1250-NEXT: s_mov_b32 s13, s3
-; GFX1250-NEXT: s_mov_b32 s16, s4
-; GFX1250-NEXT: s_mov_b32 s17, s5
-; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s10
+; GFX1250-NEXT: s_mov_b32 s5, s11
+; GFX1250-NEXT: s_mov_b32 s16, s12
+; GFX1250-NEXT: s_mov_b32 s17, s13
+; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[16:19], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: s_wait_xcnt 0x1
-; GFX1250-NEXT: s_mov_b32 s12, s6
-; GFX1250-NEXT: s_mov_b32 s13, s7
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s14
+; GFX1250-NEXT: s_mov_b32 s5, s15
+; GFX1250-NEXT: s_mov_b32 s0, s8
+; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: s_mov_b32 s1, s9
; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5]
; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
; GFX1250-NEXT: s_endpgm
%a = load volatile double, ptr addrspace(1) %aptr, align 4
%b = load volatile double, ptr addrspace(1) %bptr, align 4
@@ -1427,36 +1427,36 @@ define amdgpu_kernel void @test_fmin3_olt_1_f64(ptr addrspace(1) %out, ptr addrs
;
; GFX1250-LABEL: test_fmin3_olt_1_f64:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s18, s10
-; GFX1250-NEXT: s_mov_b32 s19, s11
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s6, s2
+; GFX1250-NEXT: s_mov_b32 s7, s3
+; GFX1250-NEXT: s_mov_b32 s18, s2
+; GFX1250-NEXT: s_mov_b32 s19, s3
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s12, s2
-; GFX1250-NEXT: s_mov_b32 s13, s3
-; GFX1250-NEXT: s_mov_b32 s16, s4
-; GFX1250-NEXT: s_mov_b32 s17, s5
-; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s10
+; GFX1250-NEXT: s_mov_b32 s5, s11
+; GFX1250-NEXT: s_mov_b32 s16, s12
+; GFX1250-NEXT: s_mov_b32 s17, s13
+; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[16:19], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: s_wait_xcnt 0x1
-; GFX1250-NEXT: s_mov_b32 s12, s6
-; GFX1250-NEXT: s_mov_b32 s13, s7
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[12:15], null scope:SCOPE_SYS
+; GFX1250-NEXT: s_mov_b32 s4, s14
+; GFX1250-NEXT: s_mov_b32 s5, s15
+; GFX1250-NEXT: s_mov_b32 s0, s8
+; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[4:7], null scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: s_mov_b32 s1, s9
; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5]
; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[2:3], v[0:1]
-; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
; GFX1250-NEXT: s_endpgm
%a = load volatile double, ptr addrspace(1) %aptr, align 4
%b = load volatile double, ptr addrspace(1) %bptr, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
index e532dea..f807169 100644
--- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
@@ -11,22 +11,20 @@ define void @test_i8load_v4i8store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG: ; %bb.0:
; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
-; GCN-SDAG-NEXT: global_load_u8 v2, v[2:3], off
-; GCN-SDAG-NEXT: global_load_u8 v3, v[4:5], off
-; GCN-SDAG-NEXT: global_load_u8 v0, v[0:1], off
+; GCN-SDAG-NEXT: global_load_u8 v6, v[2:3], off
+; GCN-SDAG-NEXT: global_load_u8 v7, v[4:5], off
+; GCN-SDAG-NEXT: global_load_u8 v10, v[0:1], off
; GCN-SDAG-NEXT: s_wait_loadcnt 0x2
; GCN-SDAG-NEXT: s_wait_xcnt 0x0
-; GCN-SDAG-NEXT: v_lshlrev_b16 v1, 8, v2
+; GCN-SDAG-NEXT: v_lshlrev_b16 v0, 8, v6
; GCN-SDAG-NEXT: s_wait_loadcnt 0x1
-; GCN-SDAG-NEXT: v_lshlrev_b16 v2, 8, v3
+; GCN-SDAG-NEXT: v_lshlrev_b16 v1, 8, v7
+; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GCN-SDAG-NEXT: v_or_b32_e32 v1, v7, v1
; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
-; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GCN-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
-; GCN-SDAG-NEXT: v_or_b32_e32 v1, v3, v2
-; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GCN-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GCN-SDAG-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_bitop2_b32 v0, v10, v0 bitop3:0x54
+; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GCN-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GCN-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
; GCN-SDAG-NEXT: global_store_b32 v[8:9], v0, off
; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31]
@@ -35,13 +33,15 @@ define void @test_i8load_v4i8store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL: ; %bb.0:
; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
-; GCN-GISEL-NEXT: global_load_u8 v0, v[0:1], off
-; GCN-GISEL-NEXT: global_load_u8 v1, v[2:3], off
-; GCN-GISEL-NEXT: global_load_u8 v2, v[4:5], off
+; GCN-GISEL-NEXT: global_load_u8 v6, v[0:1], off
+; GCN-GISEL-NEXT: global_load_u8 v7, v[2:3], off
+; GCN-GISEL-NEXT: global_load_u8 v10, v[4:5], off
; GCN-GISEL-NEXT: s_wait_loadcnt 0x1
-; GCN-GISEL-NEXT: v_lshl_or_b32 v0, v1, 8, v0
+; GCN-GISEL-NEXT: s_wait_xcnt 0x2
+; GCN-GISEL-NEXT: v_lshl_or_b32 v0, v7, 8, v6
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
-; GCN-GISEL-NEXT: v_dual_lshlrev_b32 v1, 16, v2 :: v_dual_lshlrev_b32 v2, 24, v2
+; GCN-GISEL-NEXT: s_wait_xcnt 0x1
+; GCN-GISEL-NEXT: v_dual_lshlrev_b32 v1, 16, v10 :: v_dual_lshlrev_b32 v2, 24, v10
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GCN-GISEL-NEXT: v_or3_b32 v0, v0, v1, v2
; GCN-GISEL-NEXT: global_store_b32 v[8:9], v0, off
@@ -64,21 +64,21 @@ define i16 @test_v7i16_load_store(ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2
; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
; GCN-SDAG-NEXT: global_load_b128 v[4:7], v[0:1], off
-; GCN-SDAG-NEXT: global_load_b128 v[0:3], v[2:3], off
-; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 0
-; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
-; GCN-SDAG-NEXT: v_pk_add_u16 v10, v6, v2
-; GCN-SDAG-NEXT: v_pk_add_u16 v11, v7, v3
+; GCN-SDAG-NEXT: global_load_b128 v[8:11], v[2:3], off
; GCN-SDAG-NEXT: s_wait_xcnt 0x0
; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 12
+; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
+; GCN-SDAG-NEXT: v_pk_add_u16 v1, v6, v10
+; GCN-SDAG-NEXT: v_pk_add_u16 v12, v7, v11
; GCN-SDAG-NEXT: v_mov_b64_e32 v[6:7], 8
-; GCN-SDAG-NEXT: v_pk_add_u16 v4, v4, v0
-; GCN-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GCN-SDAG-NEXT: v_pk_add_u16 v5, v5, v1
+; GCN-SDAG-NEXT: v_mov_b64_e32 v[10:11], 0
+; GCN-SDAG-NEXT: v_pk_add_u16 v5, v5, v9
+; GCN-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GCN-SDAG-NEXT: v_pk_add_u16 v4, v4, v8
; GCN-SDAG-NEXT: s_clause 0x2
-; GCN-SDAG-NEXT: global_store_b16 v[2:3], v11, off
-; GCN-SDAG-NEXT: global_store_b32 v[6:7], v10, off
-; GCN-SDAG-NEXT: global_store_b64 v[8:9], v[4:5], off
+; GCN-SDAG-NEXT: global_store_b16 v[2:3], v12, off
+; GCN-SDAG-NEXT: global_store_b32 v[6:7], v1, off
+; GCN-SDAG-NEXT: global_store_b64 v[10:11], v[4:5], off
; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31]
;
; GCN-GISEL-LABEL: test_v7i16_load_store:
@@ -86,28 +86,29 @@ define i16 @test_v7i16_load_store(ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2
; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
; GCN-GISEL-NEXT: global_load_b128 v[4:7], v[0:1], off
-; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[2:3], off
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[10:11], 2
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 4
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 6
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[16:17], 8
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[18:19], 10
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 12
+; GCN-GISEL-NEXT: global_load_b128 v[8:11], v[2:3], off
+; GCN-GISEL-NEXT: s_wait_xcnt 0x0
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 2
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 4
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[16:17], 6
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[18:19], 8
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 10
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[22:23], 12
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
-; GCN-GISEL-NEXT: v_pk_add_u16 v2, v6, v2
-; GCN-GISEL-NEXT: v_pk_add_u16 v4, v4, v0
-; GCN-GISEL-NEXT: v_pk_add_u16 v1, v5, v1
-; GCN-GISEL-NEXT: v_pk_add_u16 v3, v7, v3
+; GCN-GISEL-NEXT: v_pk_add_u16 v1, v6, v10
+; GCN-GISEL-NEXT: v_pk_add_u16 v4, v4, v8
+; GCN-GISEL-NEXT: v_pk_add_u16 v5, v5, v9
+; GCN-GISEL-NEXT: v_pk_add_u16 v6, v7, v11
; GCN-GISEL-NEXT: s_clause 0x6
-; GCN-GISEL-NEXT: global_store_b16 v[8:9], v4, off
-; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[10:11], v4, off
-; GCN-GISEL-NEXT: global_store_b16 v[12:13], v1, off
-; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[14:15], v1, off
-; GCN-GISEL-NEXT: global_store_b16 v[16:17], v2, off
-; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[18:19], v2, off
-; GCN-GISEL-NEXT: global_store_b16 v[20:21], v3, off
-; GCN-GISEL-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GCN-GISEL-NEXT: global_store_b16 v[2:3], v4, off
+; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[12:13], v4, off
+; GCN-GISEL-NEXT: global_store_b16 v[14:15], v5, off
+; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[16:17], v5, off
+; GCN-GISEL-NEXT: global_store_b16 v[18:19], v1, off
+; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[20:21], v1, off
+; GCN-GISEL-NEXT: global_store_b16 v[22:23], v6, off
+; GCN-GISEL-NEXT: v_lshrrev_b32_e32 v0, 16, v1
; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31]
%vec1 = load <7 x i16>, ptr addrspace(1) %ptr1
%insert = insertelement <7 x i16> %vec1, i16 20, i32 4
@@ -253,8 +254,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG-NEXT: global_load_b128 v[22:25], v[0:1], off offset:32
; GCN-SDAG-NEXT: global_load_b128 v[26:29], v[0:1], off offset:16
; GCN-SDAG-NEXT: global_load_b128 v[30:33], v[0:1], off
-; GCN-SDAG-NEXT: global_load_b128 v[0:3], v[0:1], off offset:64
-; GCN-SDAG-NEXT: v_mov_b64_e32 v[36:37], 0x70
+; GCN-SDAG-NEXT: global_load_b128 v[34:37], v[0:1], off offset:64
+; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x70
; GCN-SDAG-NEXT: v_mov_b64_e32 v[48:49], 48
; GCN-SDAG-NEXT: v_mov_b64_e32 v[38:39], 0x60
; GCN-SDAG-NEXT: v_mov_b64_e32 v[50:51], 32
@@ -262,14 +263,15 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG-NEXT: v_mov_b64_e32 v[66:67], 0
; GCN-SDAG-NEXT: v_mov_b64_e32 v[52:53], 0x50
; GCN-SDAG-NEXT: v_mov_b64_e32 v[54:55], 64
-; GCN-SDAG-NEXT: v_dual_mov_b32 v34, 0xc8 :: v_dual_mov_b32 v35, 0
+; GCN-SDAG-NEXT: s_wait_xcnt 0x0
+; GCN-SDAG-NEXT: v_dual_mov_b32 v0, 0xc8 :: v_dual_mov_b32 v1, 0
; GCN-SDAG-NEXT: s_wait_loadcnt 0x7
-; GCN-SDAG-NEXT: global_store_b128 v[36:37], v[6:9], off
+; GCN-SDAG-NEXT: global_store_b128 v[2:3], v[6:9], off
; GCN-SDAG-NEXT: s_wait_loadcnt 0x6
; GCN-SDAG-NEXT: global_store_b128 v[38:39], v[10:13], off
; GCN-SDAG-NEXT: s_wait_loadcnt 0x5
; GCN-SDAG-NEXT: s_wait_xcnt 0x1
-; GCN-SDAG-NEXT: v_dual_mov_b32 v36, v16 :: v_dual_mov_b32 v37, v17
+; GCN-SDAG-NEXT: v_dual_mov_b32 v2, v16 :: v_dual_mov_b32 v3, v17
; GCN-SDAG-NEXT: s_wait_xcnt 0x0
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[12:13], v[12:13], v[12:13]
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[10:11], v[10:11], v[10:11]
@@ -286,8 +288,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[8:9], v[8:9], v[8:9]
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[6:7], v[6:7], v[6:7]
; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
-; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[50:51], v[2:3], v[2:3]
-; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[48:49], v[0:1], v[0:1]
+; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[50:51], v[36:37], v[36:37]
+; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[48:49], v[34:35], v[34:35]
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[16:17], v[16:17], v[16:17]
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[14:15], 0xc8, v[14:15]
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[24:25], 0x64, v[24:25]
@@ -298,8 +300,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[20:21], v[20:21], v[20:21]
; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[18:19], v[18:19], v[18:19]
; GCN-SDAG-NEXT: s_clause 0x1
-; GCN-SDAG-NEXT: global_store_b128 v[52:53], v[34:37], off
-; GCN-SDAG-NEXT: global_store_b128 v[54:55], v[0:3], off
+; GCN-SDAG-NEXT: global_store_b128 v[52:53], v[0:3], off
+; GCN-SDAG-NEXT: global_store_b128 v[54:55], v[34:37], off
; GCN-SDAG-NEXT: s_clause 0x7
; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[10:13], off offset:96
; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[6:9], off offset:112
@@ -309,7 +311,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[18:21], off offset:48
; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[30:33], off
; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[26:29], off offset:16
-; GCN-SDAG-NEXT: s_wait_xcnt 0x8
+; GCN-SDAG-NEXT: s_wait_xcnt 0x9
; GCN-SDAG-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v33
; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31]
;
@@ -325,7 +327,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL-NEXT: global_load_b128 v[22:25], v[0:1], off offset:48
; GCN-GISEL-NEXT: global_load_b128 v[26:29], v[0:1], off offset:96
; GCN-GISEL-NEXT: global_load_b128 v[30:33], v[0:1], off offset:112
-; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:64
+; GCN-GISEL-NEXT: global_load_b128 v[34:37], v[0:1], off offset:64
; GCN-GISEL-NEXT: v_mov_b64_e32 v[38:39], 0
; GCN-GISEL-NEXT: v_mov_b64_e32 v[48:49], 16
; GCN-GISEL-NEXT: v_mov_b64_e32 v[50:51], 32
@@ -333,7 +335,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL-NEXT: v_mov_b64_e32 v[66:67], 0x60
; GCN-GISEL-NEXT: v_mov_b64_e32 v[68:69], 0x70
; GCN-GISEL-NEXT: v_mov_b64_e32 v[54:55], 64
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[34:35], 0xc8
+; GCN-GISEL-NEXT: s_wait_xcnt 0x0
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[0:1], 0xc8
; GCN-GISEL-NEXT: v_mov_b64_e32 v[64:65], 0x50
; GCN-GISEL-NEXT: s_wait_loadcnt 0x6
; GCN-GISEL-NEXT: global_store_b128 v[38:39], v[10:13], off
@@ -349,7 +352,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL-NEXT: global_store_b128 v[68:69], v[30:33], off
; GCN-GISEL-NEXT: s_wait_xcnt 0x5
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[12:13], v[12:13], v[12:13]
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[36:37], v[8:9]
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], v[8:9]
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[10:11], v[10:11], v[10:11]
; GCN-GISEL-NEXT: s_wait_xcnt 0x4
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[14:15], v[14:15], v[14:15]
@@ -361,8 +364,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[22:23], v[22:23], v[22:23]
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[24:25], v[24:25], v[24:25]
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
-; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[48:49], v[0:1], v[0:1]
-; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[50:51], v[2:3], v[2:3]
+; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[48:49], v[34:35], v[34:35]
+; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[50:51], v[36:37], v[36:37]
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[6:7], 0xc8, v[6:7]
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[8:9], v[8:9], v[8:9]
; GCN-GISEL-NEXT: s_wait_xcnt 0x1
@@ -372,8 +375,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[30:31], v[30:31], v[30:31]
; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[32:33], v[32:33], v[32:33]
; GCN-GISEL-NEXT: s_clause 0x1
-; GCN-GISEL-NEXT: global_store_b128 v[54:55], v[0:3], off
-; GCN-GISEL-NEXT: global_store_b128 v[64:65], v[34:37], off
+; GCN-GISEL-NEXT: global_store_b128 v[54:55], v[34:37], off
+; GCN-GISEL-NEXT: global_store_b128 v[64:65], v[0:3], off
; GCN-GISEL-NEXT: s_clause 0x7
; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[10:13], off
; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[14:17], off offset:16
@@ -383,7 +386,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[6:9], off offset:80
; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[26:29], off offset:96
; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[30:33], off offset:112
-; GCN-GISEL-NEXT: s_wait_xcnt 0x9
+; GCN-GISEL-NEXT: s_wait_xcnt 0x8
; GCN-GISEL-NEXT: v_dual_mov_b32 v0, v12 :: v_dual_mov_b32 v1, v13
; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31]
%a = load <16 x i64>, ptr addrspace(1) %ptr_a, align 4
@@ -402,16 +405,17 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1,
; GCN-SDAG-LABEL: test_v7i16_load_store_kernel:
; GCN-SDAG: ; %bb.0:
; GCN-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GCN-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GCN-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GCN-SDAG-NEXT: s_wait_xcnt 0x0
; GCN-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
-; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 12
; GCN-SDAG-NEXT: v_mov_b64_e32 v[10:11], 8
; GCN-SDAG-NEXT: v_mov_b64_e32 v[12:13], 0
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
; GCN-SDAG-NEXT: s_clause 0x1
-; GCN-SDAG-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset
-; GCN-SDAG-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset
+; GCN-SDAG-NEXT: global_load_b128 v[0:3], v8, s[0:1] scale_offset
+; GCN-SDAG-NEXT: global_load_b128 v[4:7], v8, s[2:3] scale_offset
+; GCN-SDAG-NEXT: s_wait_xcnt 0x0
+; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 12
; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
; GCN-SDAG-NEXT: v_pk_add_u16 v3, v3, v7
; GCN-SDAG-NEXT: v_pk_add_u16 v2, v2, v6
@@ -428,10 +432,9 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1,
; GCN-GISEL-LABEL: test_v7i16_load_store_kernel:
; GCN-GISEL: ; %bb.0:
; GCN-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GCN-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GCN-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GCN-GISEL-NEXT: s_wait_xcnt 0x0
; GCN-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0
; GCN-GISEL-NEXT: v_mov_b64_e32 v[10:11], 2
; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 4
; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 6
@@ -440,8 +443,10 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1,
; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 12
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
; GCN-GISEL-NEXT: s_clause 0x1
-; GCN-GISEL-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset
-; GCN-GISEL-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset
+; GCN-GISEL-NEXT: global_load_b128 v[0:3], v8, s[0:1] scale_offset
+; GCN-GISEL-NEXT: global_load_b128 v[4:7], v8, s[2:3] scale_offset
+; GCN-GISEL-NEXT: s_wait_xcnt 0x0
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
; GCN-GISEL-NEXT: v_pk_add_u16 v0, v0, v4
; GCN-GISEL-NEXT: v_pk_add_u16 v1, v1, v5
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
index 92836d8..63db24a 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
@@ -486,7 +486,7 @@ body: |
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK-NEXT: S_ENDPGM 0
bb.0:
S_NOP 0, implicit-def $agpr0
@@ -516,7 +516,7 @@ body: |
S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
- INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2
+ INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2
S_ENDPGM 0
...
@@ -1368,7 +1368,7 @@ body: |
; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -1408,7 +1408,7 @@ body: |
undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
@@ -1726,7 +1726,7 @@ body: |
; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -1763,7 +1763,7 @@ body: |
undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
%0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
%4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
index 9cbdc38..5b3e486 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
@@ -8,16 +8,16 @@
define amdgpu_kernel void @s_input_output_i128() {
; GFX908-LABEL: name: s_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %13
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %13
; GFX908-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %13
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: s_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %11
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %11
; GFX90A-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %11
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = tail call i128 asm sideeffect "; def $0", "=s"()
call void asm sideeffect "; use $0", "s"(i128 %val)
@@ -27,16 +27,16 @@ define amdgpu_kernel void @s_input_output_i128() {
define amdgpu_kernel void @v_input_output_i128() {
; GFX908-LABEL: name: v_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %13
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %13
; GFX908-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY %13
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7798793 /* reguse:VReg_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7929865 /* reguse:VReg_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: v_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %11
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %11
; GFX90A-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY %11
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7995401 /* reguse:VReg_128_Align2 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8257545 /* reguse:VReg_128_Align2 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = tail call i128 asm sideeffect "; def $0", "=v"()
call void asm sideeffect "; use $0", "v"(i128 %val)
@@ -47,16 +47,16 @@ define amdgpu_kernel void @a_input_output_i128() {
; GFX908-LABEL: name: a_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8323082 /* regdef:AReg_128 */, def %13
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8847370 /* regdef:AReg_128 */, def %13
; GFX908-NEXT: [[COPY:%[0-9]+]]:areg_128 = COPY %13
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: a_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8650762 /* regdef:AReg_128_Align2 */, def %11
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9568266 /* regdef:AReg_128_Align2 */, def %11
; GFX90A-NEXT: [[COPY:%[0-9]+]]:areg_128_align2 = COPY %11
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = call i128 asm sideeffect "; def $0", "=a"()
call void asm sideeffect "; use $0", "a"(i128 %val)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll
index 90fcb51..fa97380 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll
@@ -11,14 +11,11 @@ declare i32 @llvm.amdgcn.cluster.id.z() #0
define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) {
; CHECK-UNKNOWN-LABEL: test_cluster_id_x:
; CHECK-UNKNOWN: ; %bb.0:
-; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp9 :: v_dual_mov_b32 v1, 0
; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0
-; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1]
+; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3]
; CHECK-UNKNOWN-NEXT: s_endpgm
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0
;
; CHECK-MESA3D-LABEL: test_cluster_id_x:
; CHECK-MESA3D: .amd_kernel_code_t
@@ -68,7 +65,7 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -98,14 +95,11 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) {
;
; CHECK-G-UNKNOWN-LABEL: test_cluster_id_x:
; CHECK-G-UNKNOWN: ; %bb.0:
-; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp9 :: v_dual_mov_b32 v1, 0
; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0
-; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1]
+; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3]
; CHECK-G-UNKNOWN-NEXT: s_endpgm
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0
;
; CHECK-G-MESA3D-LABEL: test_cluster_id_x:
; CHECK-G-MESA3D: .amd_kernel_code_t
@@ -155,7 +149,7 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -190,14 +184,11 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) {
define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 {
; CHECK-UNKNOWN-LABEL: test_cluster_id_y:
; CHECK-UNKNOWN: ; %bb.0:
-; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp7 :: v_dual_mov_b32 v1, 0
; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0
-; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1]
+; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3]
; CHECK-UNKNOWN-NEXT: s_endpgm
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 1
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0
;
; CHECK-MESA3D-LABEL: test_cluster_id_y:
; CHECK-MESA3D: .amd_kernel_code_t
@@ -247,7 +238,7 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -277,14 +268,11 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 {
;
; CHECK-G-UNKNOWN-LABEL: test_cluster_id_y:
; CHECK-G-UNKNOWN: ; %bb.0:
-; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp7 :: v_dual_mov_b32 v1, 0
; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0
-; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1]
+; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3]
; CHECK-G-UNKNOWN-NEXT: s_endpgm
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 1
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0
;
; CHECK-G-MESA3D-LABEL: test_cluster_id_y:
; CHECK-G-MESA3D: .amd_kernel_code_t
@@ -334,7 +322,7 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -369,16 +357,14 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 {
define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 {
; CHECK-UNKNOWN-LABEL: test_cluster_id_z:
; CHECK-UNKNOWN: ; %bb.0:
-; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
-; CHECK-UNKNOWN-NEXT: s_lshr_b32 s2, ttmp7, 16
+; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
+; CHECK-UNKNOWN-NEXT: s_wait_xcnt 0x0
+; CHECK-UNKNOWN-NEXT: s_lshr_b32 s0, ttmp7, 16
; CHECK-UNKNOWN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0
-; CHECK-UNKNOWN-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-UNKNOWN-NEXT: global_store_b32 v0, v1, s[2:3]
; CHECK-UNKNOWN-NEXT: s_endpgm
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
-; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1
;
; CHECK-MESA3D-LABEL: test_cluster_id_z:
; CHECK-MESA3D: .amd_kernel_code_t
@@ -428,7 +414,7 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -460,16 +446,14 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 {
;
; CHECK-G-UNKNOWN-LABEL: test_cluster_id_z:
; CHECK-G-UNKNOWN: ; %bb.0:
-; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
-; CHECK-G-UNKNOWN-NEXT: s_lshr_b32 s2, ttmp7, 16
+; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
+; CHECK-G-UNKNOWN-NEXT: s_wait_xcnt 0x0
+; CHECK-G-UNKNOWN-NEXT: s_lshr_b32 s0, ttmp7, 16
; CHECK-G-UNKNOWN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
+; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s0
; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0
-; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1]
+; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3]
; CHECK-G-UNKNOWN-NEXT: s_endpgm
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
-; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1
;
; CHECK-G-MESA3D-LABEL: test_cluster_id_z:
; CHECK-G-MESA3D: .amd_kernel_code_t
@@ -519,7 +503,7 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll
index aa3b7b3..3ef84a3 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll
@@ -67,7 +67,7 @@ define amdgpu_kernel void @test_workgroup_id_x(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -155,7 +155,7 @@ define amdgpu_kernel void @test_workgroup_id_x(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -246,7 +246,7 @@ define amdgpu_kernel void @test_workgroup_id_x_optimized(ptr addrspace(1) %out)
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -330,7 +330,7 @@ define amdgpu_kernel void @test_workgroup_id_x_optimized(ptr addrspace(1) %out)
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -421,7 +421,7 @@ define amdgpu_kernel void @test_workgroup_id_y(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -509,7 +509,7 @@ define amdgpu_kernel void @test_workgroup_id_y(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -600,7 +600,7 @@ define amdgpu_kernel void @test_workgroup_id_y_optimized(ptr addrspace(1) %out)
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -684,7 +684,7 @@ define amdgpu_kernel void @test_workgroup_id_y_optimized(ptr addrspace(1) %out)
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -775,7 +775,7 @@ define amdgpu_kernel void @test_workgroup_id_z(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -863,7 +863,7 @@ define amdgpu_kernel void @test_workgroup_id_z(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -956,7 +956,7 @@ define amdgpu_kernel void @test_workgroup_flat_id(ptr addrspace(1) %out) {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -1044,7 +1044,7 @@ define amdgpu_kernel void @test_workgroup_flat_id(ptr addrspace(1) %out) {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -1135,7 +1135,7 @@ define amdgpu_kernel void @test_workgroup_id_z_optimized(ptr addrspace(1) %out)
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -1219,7 +1219,7 @@ define amdgpu_kernel void @test_workgroup_id_z_optimized(ptr addrspace(1) %out)
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll
index afe37e3..b8ff9e5 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll
@@ -65,7 +65,7 @@ define amdgpu_kernel void @test_workgroup_max_flat_id(ptr addrspace(1) %out) #1
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -153,7 +153,7 @@ define amdgpu_kernel void @test_workgroup_max_flat_id(ptr addrspace(1) %out) #1
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll
index 7ea4fa5..9bca696 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll
@@ -67,7 +67,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -155,7 +155,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -246,7 +246,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x_optimized(ptr addrspace(1) %o
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -330,7 +330,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x_optimized(ptr addrspace(1) %o
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -421,7 +421,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -509,7 +509,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -600,7 +600,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y_optimized(ptr addrspace(1) %o
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -684,7 +684,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y_optimized(ptr addrspace(1) %o
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -775,7 +775,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z(ptr addrspace(1) %out) #1 {
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -863,7 +863,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z(ptr addrspace(1) %out) #1 {
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -954,7 +954,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z_optimized(ptr addrspace(1) %o
; CHECK-MESA3D-NEXT: is_ptr64 = 1
; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0
@@ -1038,7 +1038,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z_optimized(ptr addrspace(1) %o
; CHECK-G-MESA3D-NEXT: is_ptr64 = 1
; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0
; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0
-; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0
+; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1
; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0
; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
index 56215ca..67d0410 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
@@ -59,21 +59,20 @@ define amdgpu_kernel void @is_private_vgpr(ptr addrspace(1) %ptr.ptr) {
; GFX9-NEXT: global_store_dword v[0:1], v0, off
; GFX9-NEXT: s_endpgm
;
-; GFX1250-SDAG-LABEL: is_private_vgpr:
-; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v1
-; GFX1250-SDAG-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v0, off
-; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-LABEL: is_private_vgpr:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0
+; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT: global_store_b32 v[0:1], v0, off
+; GFX1250-NEXT: s_endpgm
;
; CI-GISEL-LABEL: is_private_vgpr:
; CI-GISEL: ; %bb.0:
@@ -122,22 +121,6 @@ define amdgpu_kernel void @is_private_vgpr(ptr addrspace(1) %ptr.ptr) {
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-NEXT: global_store_b32 v[0:1], v0, off
; GFX11-NEXT: s_endpgm
-;
-; GFX1250-GISEL-LABEL: is_private_vgpr:
-; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v1, v2
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v0, off
-; GFX1250-GISEL-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds ptr, ptr addrspace(1) %ptr.ptr, i32 %id
%ptr = load volatile ptr, ptr addrspace(1) %gep
@@ -206,9 +189,8 @@ define amdgpu_kernel void @is_private_sgpr(ptr %ptr) {
; GFX1250-SDAG-LABEL: is_private_sgpr:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x4
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: s_xor_b32 s0, s0, s1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s0, 0x4000000
; GFX1250-SDAG-NEXT: s_cselect_b32 s0, -1, 0
@@ -285,9 +267,8 @@ define amdgpu_kernel void @is_private_sgpr(ptr %ptr) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_xor_b32 s0, s1, s0
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, s1, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s0, 0x4000000
; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB1_2
; GFX1250-GISEL-NEXT: ; %bb.1: ; %bb0
@@ -311,5 +292,4 @@ bb1:
; CI: {{.*}}
; GFX10-GISEL: {{.*}}
; GFX11-GISEL: {{.*}}
-; GFX1250: {{.*}}
; SI-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll
index 4f7bbf8..42a50bb 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll
@@ -5,13 +5,13 @@
define amdgpu_kernel void @v_permlane_bcast_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) {
; GFX1250-LABEL: v_permlane_bcast_b32_vss:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34
+; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_permlane_bcast_b32 v0, v0, s3, s4
+; GFX1250-NEXT: v_permlane_bcast_b32 v0, v0, s3, s6
; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1250-NEXT: s_endpgm
%v = call i32 @llvm.amdgcn.permlane.bcast(i32 %src0, i32 %src1, i32 %src2)
@@ -92,13 +92,13 @@ define amdgpu_kernel void @v_permlane_bcast_b32_vvv(ptr addrspace(1) %out, i32 %
define amdgpu_kernel void @v_permlane_down_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) {
; GFX1250-LABEL: v_permlane_down_b32_vss:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34
+; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_permlane_down_b32 v0, v0, s3, s4
+; GFX1250-NEXT: v_permlane_down_b32 v0, v0, s3, s6
; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1250-NEXT: s_endpgm
%v = call i32 @llvm.amdgcn.permlane.down(i32 %src0, i32 %src1, i32 %src2)
@@ -179,13 +179,13 @@ define amdgpu_kernel void @v_permlane_down_b32_vvv(ptr addrspace(1) %out, i32 %s
define amdgpu_kernel void @v_permlane_up_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) {
; GFX1250-LABEL: v_permlane_up_b32_vss:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34
+; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_permlane_up_b32 v0, v0, s3, s4
+; GFX1250-NEXT: v_permlane_up_b32 v0, v0, s3, s6
; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1250-NEXT: s_endpgm
%v = call i32 @llvm.amdgcn.permlane.up(i32 %src0, i32 %src1, i32 %src2)
@@ -266,13 +266,13 @@ define amdgpu_kernel void @v_permlane_up_b32_vvv(ptr addrspace(1) %out, i32 %src
define amdgpu_kernel void @v_permlane_xor_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) {
; GFX1250-LABEL: v_permlane_xor_b32_vss:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34
+; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_permlane_xor_b32 v0, v0, s3, s4
+; GFX1250-NEXT: v_permlane_xor_b32 v0, v0, s3, s6
; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1250-NEXT: s_endpgm
%v = call i32 @llvm.amdgcn.permlane.xor(i32 %src0, i32 %src1, i32 %src2)
diff --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir
index 76e2092..abcae69 100644
--- a/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir
+++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir
@@ -69,9 +69,9 @@ body: |
bb.0:
; GCN-LABEL: name: merge_s_load_x1_x1_imm_no_scale_offset
; GCN: [[DEF:%[0-9]+]]:sgpr_64 = IMPLICIT_DEF
- ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[DEF]], 0, 0 :: (dereferenceable invariant load (s64), align 4)
- ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[S_LOAD_DWORDX2_IMM]].sub0
- ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_LOAD_DWORDX2_IMM]].sub1
+ ; GCN-NEXT: early-clobber %3:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec [[DEF]], 0, 0 :: (dereferenceable invariant load (s64), align 4)
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32_xm0_xexec = COPY %3.sub0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed %3.sub1
%0:sgpr_64 = IMPLICIT_DEF
%1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 0, 0 :: (dereferenceable invariant load (s32))
%2:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 4, 0 :: (dereferenceable invariant load (s32))
diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
index 1e6b77e..4ad161c 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
@@ -471,13 +471,13 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX1250-NEXT: s_cmp_eq_u32 s0, 0
; GFX1250-NEXT: s_cbranch_scc1 .LBB4_3
; GFX1250-NEXT: ; %bb.1: ; %for.body.preheader
-; GFX1250-NEXT: s_load_b128 s[4:7], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 4, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[4:5], v[0:1]
+; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[10:11], v[0:1]
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[8:9], v[0:1]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0xb0, v[2:3]
; GFX1250-NEXT: .LBB4_2: ; %for.body
@@ -602,13 +602,13 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX1250-NEXT: s_cmp_eq_u32 s0, 0
; GFX1250-NEXT: s_cbranch_scc1 .LBB5_3
; GFX1250-NEXT: ; %bb.1: ; %for.body.preheader
-; GFX1250-NEXT: s_load_b128 s[4:7], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 4, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[4:5], v[0:1]
+; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[10:11], v[0:1]
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[8:9], v[0:1]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0xb0, v[2:3]
; GFX1250-NEXT: .LBB5_2: ; %for.body
diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
index dbcd370..08ec0c8 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -1117,18 +1117,19 @@ define amdgpu_kernel void @mad_i64_i32_uniform(ptr addrspace(1) %out, i32 %arg0,
;
; GFX1250-LABEL: mad_i64_i32_uniform:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
-; GFX1250-NEXT: s_mov_b32 s7, 0
+; GFX1250-NEXT: s_mov_b32 s5, 0
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s6, s2
+; GFX1250-NEXT: s_mov_b32 s4, s2
; GFX1250-NEXT: s_mov_b32 s2, s3
-; GFX1250-NEXT: s_mov_b32 s3, s7
+; GFX1250-NEXT: s_mov_b32 s3, s5
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_mul_u64 s[2:3], s[6:7], s[2:3]
-; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[4:5]
+; GFX1250-NEXT: s_mul_u64 s[2:3], s[4:5], s[2:3]
+; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[6:7]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir
index 61f2629..c19d5a6 100644
--- a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir
+++ b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir
@@ -33,7 +33,7 @@ name: asm_write_vgpr_accvgpr_write_read
body: |
bb.0:
- INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr0
+ INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr0
$agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
...
@@ -47,7 +47,7 @@ name: asm_write_vgpr_accvgpr_write_read_partialnop
body: |
bb.0:
- INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr0
+ INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr0
S_NOP 0
$agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
...
@@ -60,7 +60,7 @@ name: asm_write_vgpr_accvgpr_write_read_otherreg
body: |
bb.0:
liveins: $vgpr0
- INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr1
+ INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr1
$agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
...
diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll
index fef9a9a..ae08054 100644
--- a/llvm/test/CodeGen/AMDGPU/max.ll
+++ b/llvm/test/CodeGen/AMDGPU/max.ll
@@ -257,16 +257,15 @@ define amdgpu_kernel void @v_test_imax_sge_i8(ptr addrspace(1) %out, ptr addrspa
;
; GFX1250-LABEL: v_test_imax_sge_i8:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_load_i8 s2, s[2:3], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_i8 s3, s[4:5], 0x0
+; GFX1250-NEXT: s_load_i8 s4, s[2:3], 0x0
+; GFX1250-NEXT: s_load_i8 s5, s[6:7], 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_max_i32 s2, s2, s3
+; GFX1250-NEXT: s_max_i32 s2, s4, s5
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b32_e32 v1, s2
; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1]
@@ -701,16 +700,15 @@ define amdgpu_kernel void @v_test_umax_uge_i8(ptr addrspace(1) %out, ptr addrspa
;
; GFX1250-LABEL: v_test_umax_uge_i8:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_load_u8 s2, s[2:3], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_u8 s3, s[4:5], 0x0
+; GFX1250-NEXT: s_load_u8 s4, s[2:3], 0x0
+; GFX1250-NEXT: s_load_u8 s5, s[6:7], 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_max_u32 s2, s2, s3
+; GFX1250-NEXT: s_max_u32 s2, s4, s5
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b32_e32 v1, s2
; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1]
@@ -777,13 +775,12 @@ define amdgpu_kernel void @v_test_umax_ugt_i32(ptr addrspace(1) %out, ptr addrsp
; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
-; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b32 s6, s[0:1], 0x0
+; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x24
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_max_u32_e32 v0, s2, v0
-; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: v_max_u32_e32 v0, s6, v0
+; GFX1250-NEXT: global_store_b32 v1, v0, s[2:3]
; GFX1250-NEXT: s_endpgm
;
; EG-LABEL: v_test_umax_ugt_i32:
@@ -1122,12 +1119,12 @@ define amdgpu_kernel void @test_umax_ugt_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_umax_ugt_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
;
@@ -1175,12 +1172,12 @@ define amdgpu_kernel void @test_umax_uge_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_umax_uge_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
;
@@ -1228,12 +1225,12 @@ define amdgpu_kernel void @test_imax_sgt_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_imax_sgt_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
;
@@ -1281,12 +1278,12 @@ define amdgpu_kernel void @test_imax_sge_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_imax_sge_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll
index 311527d..6a3d31f 100644
--- a/llvm/test/CodeGen/AMDGPU/min.ll
+++ b/llvm/test/CodeGen/AMDGPU/min.ll
@@ -131,14 +131,14 @@ define amdgpu_kernel void @v_test_imin_sle_i32(ptr addrspace(1) %out, ptr addrsp
;
; GFX1250-LABEL: v_test_imin_sle_i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2
; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
@@ -1172,14 +1172,14 @@ define amdgpu_kernel void @s_test_imin_sle_v4i16(ptr addrspace(1) %out, <4 x i16
;
; GFX1250-LABEL: s_test_imin_sle_v4i16:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_pk_min_i16 v1, s1, s3
; GFX1250-NEXT: v_pk_min_i16 v0, s0, s2
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[6:7]
; GFX1250-NEXT: s_endpgm
%cmp = icmp sle <4 x i16> %a, %b
%val = select <4 x i1> %cmp, <4 x i16> %a, <4 x i16> %b
@@ -1307,14 +1307,14 @@ define amdgpu_kernel void @v_test_imin_slt_i32(ptr addrspace(1) %out, ptr addrsp
;
; GFX1250-LABEL: v_test_imin_slt_i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2
; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
@@ -1484,14 +1484,14 @@ define amdgpu_kernel void @v_test_imin_slt_i16(ptr addrspace(1) %out, ptr addrsp
;
; GFX1250-LABEL: v_test_imin_slt_i16:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_u16 v1, v0, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_u16 v2, v0, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_min_i16 v1, v1, v2
; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] scale_offset
@@ -1686,16 +1686,16 @@ define amdgpu_kernel void @s_test_imin_slt_v2i32(ptr addrspace(1) %out, <2 x i32
;
; GFX1250-LABEL: s_test_imin_slt_v2i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_min_i32 s0, s0, s2
; GFX1250-NEXT: s_min_i32 s1, s1, s3
; GFX1250-NEXT: v_mov_b32_e32 v0, s0
; GFX1250-NEXT: v_mov_b32_e32 v1, s1
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[6:7]
; GFX1250-NEXT: s_endpgm
%cmp = icmp slt <2 x i32> %a, %b
%val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b
@@ -2011,14 +2011,14 @@ define amdgpu_kernel void @v_test_umin_ule_i32(ptr addrspace(1) %out, ptr addrsp
;
; GFX1250-LABEL: v_test_umin_ule_i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2
; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
@@ -2171,16 +2171,16 @@ define amdgpu_kernel void @v_test_umin_ule_v3i32(ptr addrspace(1) %out, ptr addr
;
; GFX1250-LABEL: v_test_umin_ule_v3i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 4, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b96 v[0:2], v3, s[2:3]
-; GFX1250-NEXT: global_load_b96 v[4:6], v3, s[4:5]
+; GFX1250-NEXT: global_load_b96 v[4:6], v3, s[6:7]
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_min_u32_e32 v2, v2, v6
; GFX1250-NEXT: v_min_u32_e32 v1, v1, v5
@@ -2374,14 +2374,14 @@ define amdgpu_kernel void @v_test_umin_ule_v3i16(ptr addrspace(1) %out, ptr addr
;
; GFX1250-LABEL: v_test_umin_ule_v3i16:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: v_lshlrev_b32_e32 v4, 3, v4
; GFX1250-NEXT: s_wait_loadcnt 0x0
@@ -2611,14 +2611,14 @@ define amdgpu_kernel void @v_test_umin_ult_i32(ptr addrspace(1) %out, ptr addrsp
;
; GFX1250-LABEL: v_test_umin_ult_i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2
; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
@@ -2771,14 +2771,14 @@ define amdgpu_kernel void @v_test_umin_ult_i8(ptr addrspace(1) %out, ptr addrspa
;
; GFX1250-LABEL: v_test_umin_ult_i8:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_u8 v1, v0, s[2:3]
-; GFX1250-NEXT: global_load_u8 v2, v0, s[4:5]
+; GFX1250-NEXT: global_load_u8 v2, v0, s[6:7]
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_min_u16 v1, v1, v2
; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1]
@@ -3023,23 +3023,22 @@ define amdgpu_kernel void @v_test_umin_ult_i32_multi_use(ptr addrspace(1) %out0,
;
; GFX1250-LABEL: v_test_umin_ult_i32_multi_use:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x0
; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b32 s5, s[6:7], 0x0
+; GFX1250-NEXT: s_load_b32 s0, s[12:13], 0x0
+; GFX1250-NEXT: s_load_b32 s1, s[14:15], 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_cmp_lt_u32 s4, s5
-; GFX1250-NEXT: s_cselect_b32 s6, -1, 0
+; GFX1250-NEXT: s_cmp_lt_u32 s0, s1
+; GFX1250-NEXT: s_cselect_b32 s2, -1, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, s6
-; GFX1250-NEXT: s_and_b32 s6, s6, exec_lo
-; GFX1250-NEXT: s_cselect_b32 s4, s4, s5
-; GFX1250-NEXT: v_mov_b32_e32 v2, s4
+; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX1250-NEXT: s_and_b32 s2, s2, exec_lo
+; GFX1250-NEXT: s_cselect_b32 s0, s0, s1
+; GFX1250-NEXT: v_mov_b32_e32 v2, s0
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b32 v1, v2, s[0:1]
-; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT: global_store_b32 v1, v2, s[8:9]
+; GFX1250-NEXT: global_store_b8 v1, v0, s[10:11]
; GFX1250-NEXT: s_endpgm
%a = load i32, ptr addrspace(1) %aptr, align 4
%b = load i32, ptr addrspace(1) %bptr, align 4
@@ -3220,12 +3219,12 @@ define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(ptr addrspace(1) %out0,
;
; GFX1250-LABEL: v_test_umin_ult_i16_multi_use:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x0
; GFX1250-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_load_u16 v1, v0, s[6:7]
-; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5]
+; GFX1250-NEXT: global_load_u16 v1, v0, s[14:15]
+; GFX1250-NEXT: global_load_u16 v2, v0, s[12:13]
; GFX1250-NEXT: s_wait_loadcnt 0x1
; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff, v1
; GFX1250-NEXT: s_wait_loadcnt 0x0
@@ -3235,8 +3234,8 @@ define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(ptr addrspace(1) %out0,
; GFX1250-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
; GFX1250-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1]
-; GFX1250-NEXT: global_store_b8 v0, v2, s[2:3]
+; GFX1250-NEXT: global_store_b16 v0, v1, s[8:9]
+; GFX1250-NEXT: global_store_b8 v0, v2, s[10:11]
; GFX1250-NEXT: s_endpgm
%a = load i16, ptr addrspace(1) %aptr, align 2
%b = load i16, ptr addrspace(1) %bptr, align 2
@@ -4338,12 +4337,12 @@ define amdgpu_kernel void @test_umin_ult_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_umin_ult_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
%tmp = icmp ult i64 %a, %b
@@ -4462,12 +4461,12 @@ define amdgpu_kernel void @test_umin_ule_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_umin_ule_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
%tmp = icmp ule i64 %a, %b
@@ -4586,12 +4585,12 @@ define amdgpu_kernel void @test_imin_slt_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_imin_slt_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
%tmp = icmp slt i64 %a, %b
@@ -4710,12 +4709,12 @@ define amdgpu_kernel void @test_imin_sle_i64(ptr addrspace(1) %out, i64 %a, i64
;
; GFX1250-LABEL: test_imin_sle_i64:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[6:7]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-NEXT: s_endpgm
%tmp = icmp sle i64 %a, %b
@@ -4872,14 +4871,14 @@ define amdgpu_kernel void @v_test_imin_sle_v2i16(ptr addrspace(1) %out, ptr addr
;
; GFX1250-LABEL: v_test_imin_sle_v2i16:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_pk_min_i16 v1, v1, v2
; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
@@ -5042,14 +5041,14 @@ define amdgpu_kernel void @v_test_imin_ule_v2i16(ptr addrspace(1) %out, ptr addr
;
; GFX1250-LABEL: v_test_imin_ule_v2i16:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
-; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_pk_min_u16 v1, v1, v2
; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index baccb4c..d29847e 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -450,6 +450,7 @@ define amdgpu_kernel void @s_trunc_i64_mul_to_i32(ptr addrspace(1) %out, i64 %a,
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x34
+; GFX1250-NEXT: ; kill: killed $sgpr4_sgpr5
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mul_i32 s2, s3, s2
; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
@@ -613,25 +614,25 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(ptr addrspace(1) %out, ptr add
;
; GFX1250-LABEL: v_trunc_i64_mul_to_i32:
; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s6, s10
-; GFX1250-NEXT: s_mov_b32 s7, s11
+; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34
+; GFX1250-NEXT: s_mov_b32 s6, -1
+; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s14, s6
+; GFX1250-NEXT: s_mov_b32 s15, s7
+; GFX1250-NEXT: s_mov_b32 s10, s6
+; GFX1250-NEXT: s_mov_b32 s11, s7
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mov_b32 s12, s2
; GFX1250-NEXT: s_mov_b32 s13, s3
; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null
-; GFX1250-NEXT: buffer_load_b32 v1, off, s[4:7], null
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: buffer_load_b32 v1, off, s[8:11], null
+; GFX1250-NEXT: s_mov_b32 s4, s0
+; GFX1250-NEXT: s_mov_b32 s5, s1
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_mul_lo_u32 v0, v1, v0
-; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b32 v0, off, s[4:7], null
; GFX1250-NEXT: s_endpgm
;
; EG-LABEL: v_trunc_i64_mul_to_i32:
@@ -2091,11 +2092,11 @@ define amdgpu_kernel void @s_mul_i64(ptr addrspace(1) %out, i64 %a, i64 %b) noun
;
; GFX1250-LABEL: s_mul_i64:
; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[4:5]
+; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[6:7]
; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
; GFX1250-NEXT: s_mov_b32 s2, -1
@@ -2292,25 +2293,25 @@ define amdgpu_kernel void @v_mul_i64(ptr addrspace(1) %out, ptr addrspace(1) %ap
;
; GFX1250-LABEL: v_mul_i64:
; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
-; GFX1250-NEXT: s_mov_b32 s10, -1
-; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s14, s10
-; GFX1250-NEXT: s_mov_b32 s15, s11
-; GFX1250-NEXT: s_mov_b32 s6, s10
-; GFX1250-NEXT: s_mov_b32 s7, s11
+; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34
+; GFX1250-NEXT: s_mov_b32 s6, -1
+; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s14, s6
+; GFX1250-NEXT: s_mov_b32 s15, s7
+; GFX1250-NEXT: s_mov_b32 s10, s6
+; GFX1250-NEXT: s_mov_b32 s11, s7
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mov_b32 s12, s2
; GFX1250-NEXT: s_mov_b32 s13, s3
; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null
-; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[4:7], null
-; GFX1250-NEXT: s_mov_b32 s8, s0
-; GFX1250-NEXT: s_mov_b32 s9, s1
+; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[8:11], null
+; GFX1250-NEXT: s_mov_b32 s4, s0
+; GFX1250-NEXT: s_mov_b32 s5, s1
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null
+; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
; GFX1250-NEXT: s_endpgm
;
; EG-LABEL: v_mul_i64:
@@ -2845,30 +2846,30 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
;
; GFX1250-LABEL: mul64_in_branch:
; GFX1250: ; %bb.0: ; %entry
-; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX1250-NEXT: s_cmp_lg_u64 s[12:13], 0
; GFX1250-NEXT: s_cbranch_scc0 .LBB16_3
; GFX1250-NEXT: ; %bb.1: ; %else
-; GFX1250-NEXT: s_mul_u64 s[4:5], s[4:5], s[6:7]
+; GFX1250-NEXT: s_mul_u64 s[0:1], s[12:13], s[14:15]
; GFX1250-NEXT: s_cbranch_execnz .LBB16_4
; GFX1250-NEXT: .LBB16_2: ; %if
-; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s6, -1
-; GFX1250-NEXT: s_mov_b32 s4, s2
-; GFX1250-NEXT: s_mov_b32 s5, s3
-; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null
+; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s0, s10
+; GFX1250-NEXT: s_mov_b32 s1, s11
+; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null
; GFX1250-NEXT: s_branch .LBB16_5
; GFX1250-NEXT: .LBB16_3:
-; GFX1250-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX1250-NEXT: ; implicit-def: $sgpr0_sgpr1
; GFX1250-NEXT: s_branch .LBB16_2
; GFX1250-NEXT: .LBB16_4:
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX1250-NEXT: .LBB16_5: ; %endif
-; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
-; GFX1250-NEXT: s_mov_b32 s2, -1
+; GFX1250-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1250-NEXT: s_mov_b32 s10, -1
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
+; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null
; GFX1250-NEXT: s_endpgm
;
; EG-LABEL: mul64_in_branch:
diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
index b0651ef..78207c2 100644
--- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
+++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
@@ -340,46 +340,46 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fadd_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35]
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
-; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1]
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s20 :: v_dual_mov_b32 v33, s21
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s22 :: v_dual_mov_b32 v35, s23
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s18 :: v_dual_mov_b32 v39, s29
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s30 :: v_dual_mov_b32 v41, s31
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s24 :: v_dual_mov_b32 v37, s19
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s28 :: v_dual_mov_b32 v55, s15
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s3 :: v_dual_mov_b32 v52, s12
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s13 :: v_dual_mov_b32 v54, s14
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s7 :: v_dual_mov_b32 v50, s2
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s27 :: v_dual_mov_b32 v46, s4
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s5 :: v_dual_mov_b32 v48, s6
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s25 :: v_dual_mov_b32 v44, s26
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33]
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35]
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s9
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s10 :: v_dual_mov_b32 v35, s11
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s16 :: v_dual_mov_b32 v33, s17
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s18 :: v_dual_mov_b32 v35, s19
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[40:41]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9]
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[38:39]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[32:33]
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[34:35]
@@ -395,58 +395,58 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[36:37]
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[38:39]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35]
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1]
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v32_vs:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
-; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
-; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33]
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[36:37]
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[38:39]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[40:41]
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[42:43]
@@ -466,14 +466,14 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[36:37]
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -1597,46 +1597,46 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fmul_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35]
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
-; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1]
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s20 :: v_dual_mov_b32 v33, s21
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s22 :: v_dual_mov_b32 v35, s23
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s18 :: v_dual_mov_b32 v39, s29
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s30 :: v_dual_mov_b32 v41, s31
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s24 :: v_dual_mov_b32 v37, s19
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s28 :: v_dual_mov_b32 v55, s15
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s3 :: v_dual_mov_b32 v52, s12
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s13 :: v_dual_mov_b32 v54, s14
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s7 :: v_dual_mov_b32 v50, s2
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s27 :: v_dual_mov_b32 v46, s4
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s5 :: v_dual_mov_b32 v48, s6
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s25 :: v_dual_mov_b32 v44, s26
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33]
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35]
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s9
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s10 :: v_dual_mov_b32 v35, s11
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s16 :: v_dual_mov_b32 v33, s17
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s18 :: v_dual_mov_b32 v35, s19
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[40:41]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9]
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[38:39]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[32:33]
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[34:35]
@@ -1652,58 +1652,58 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[36:37]
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[38:39]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35]
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1]
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fmul_v32_vs:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
-; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
-; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33]
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[36:37]
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[38:39]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[40:41]
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[42:43]
@@ -1723,14 +1723,14 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[36:37]
; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -2428,46 +2428,46 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fma_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35]
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
-; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1]
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[20:21]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[22:23]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[30:31]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[28:29]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[12:13]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[14:15]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[4:5]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[6:7]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[24:25]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[26:27]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[18:19]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[40:41]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[42:43]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[50:51]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[48:49]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[20:21]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[22:23]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[10:11]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[12:13]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[14:15]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[44:45]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[46:47]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[38:39]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33]
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[40:41], v[40:41]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9]
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[38:39], v[38:39]
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[52:53], v[52:53]
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[32:33], v[32:33]
@@ -2482,58 +2482,58 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[36:37], v[36:37]
; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[38:39], v[38:39]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35]
-; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1]
+; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fma_v32_vs:
; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
-; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
-; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33]
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[36:37], v[36:37]
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[38:39], v[38:39]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[40:41], v[40:41]
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[42:43], v[42:43]
@@ -2553,14 +2553,14 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37]
; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -3529,9 +3529,9 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p
;
; GFX1250-SDAG-LABEL: fadd_fadd_fsub:
; GFX1250-SDAG: ; %bb.0: ; %bb
+; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-SDAG-NEXT: s_add_f32 s2, s1, s3
@@ -3541,14 +3541,14 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, v2
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[4:5], v[0:1] neg_lo:[0,1] neg_hi:[0,1]
-; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[4:5]
+; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[6:7]
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_fadd_fsub:
; GFX1250-GISEL: ; %bb.0: ; %bb
+; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
@@ -3560,7 +3560,7 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[2:3], v[0:1]
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_subrev_f32 v3, s3, v0
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
-; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[4:5]
+; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[6:7]
; GFX1250-GISEL-NEXT: s_endpgm
bb:
%i12 = fadd <2 x float> %arg, %arg1
diff --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
index 6509d80..f88b1bf 100644
--- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
@@ -12,7 +12,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX908-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX908-NEXT: {{ $}}
; REGALLOC-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32
- ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %25
+ ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %25
; REGALLOC-GFX908-NEXT: [[COPY:%[0-9]+]]:av_128 = COPY %25
; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def %27
; REGALLOC-GFX908-NEXT: SI_SPILL_AV64_SAVE %27, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
@@ -37,7 +37,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; PEI-GFX908-NEXT: $sgpr12 = S_ADD_U32 $sgpr12, $sgpr9, implicit-def $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15
; PEI-GFX908-NEXT: $sgpr13 = S_ADDC_U32 $sgpr13, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15
; PEI-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0
- ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
; PEI-GFX908-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec
; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def renamable $vgpr0_vgpr1
; PEI-GFX908-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr12_sgpr13_sgpr14_sgpr15, 0, 0, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5)
@@ -61,7 +61,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX90A-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX90A-NEXT: {{ $}}
; REGALLOC-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32
- ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %23
+ ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %23
; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %23
; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def %21
; REGALLOC-GFX90A-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY %21
@@ -80,7 +80,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; PEI-GFX90A-NEXT: liveins: $sgpr4_sgpr5
; PEI-GFX90A-NEXT: {{ $}}
; PEI-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0
- ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
; PEI-GFX90A-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec
; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def renamable $vgpr2_vgpr3
; PEI-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
index b717f85..6671201 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
@@ -186,12 +186,12 @@ define amdgpu_kernel void @mixed_inreg_block_count_x(ptr addrspace(1) %out, i32
;
; GFX1250-LABEL: mixed_inreg_block_count_x:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x10
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b32 s4, s[0:1], 0x10
+; GFX1250-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
+; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3]
; GFX1250-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%load = load i32, ptr addrspace(4) %imp_arg_ptr
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
index 4d367ef..c1764c9 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
@@ -346,10 +346,10 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) inreg %out, ptr ad
;
; GFX1250-LABEL: byref_preload_arg:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100
+; GFX1250-NEXT: s_load_b64 s[4:5], s[0:1], 0x100
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
-; GFX1250-NEXT: v_mov_b32_e32 v2, s1
+; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
+; GFX1250-NEXT: v_mov_b32_e32 v2, s5
; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_storecnt 0x0
; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS
@@ -404,10 +404,10 @@ define amdgpu_kernel void @byref_staggered_preload_arg(ptr addrspace(1) inreg %o
;
; GFX1250-LABEL: byref_staggered_preload_arg:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100
+; GFX1250-NEXT: s_load_b64 s[4:5], s[0:1], 0x100
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
-; GFX1250-NEXT: v_mov_b32_e32 v2, s1
+; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
+; GFX1250-NEXT: v_mov_b32_e32 v2, s5
; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS
; GFX1250-NEXT: s_wait_storecnt 0x0
; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS
diff --git a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir
index b32e997..80afe7a 100644
--- a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir
+++ b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir
@@ -43,17 +43,17 @@ machineFunctionInfo:
body: |
bb.0:
- INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $agpr0
+ INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2424842 /* regdef:AGPR_32 */, implicit-def $agpr0
%14:vgpr_32 = COPY killed $agpr0
- INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 27394058 /* regdef:VReg_512 */, def %7, 13697034 /* regdef:VReg_256 */, def %8, 6225930 /* regdef:VReg_128 */, def %9, 4915210 /* regdef:VReg_96 */, def %10, 4915210 /* regdef:VReg_96 */, def %11
+ INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 40042506 /* regdef:VReg_512 */, def %7, 19464202 /* regdef:VReg_256 */, def %8, 7929866 /* regdef:VReg_128 */, def %9, 5963786 /* regdef:VReg_96 */, def %10, 5963786 /* regdef:VReg_96 */, def %11
INLINEASM &"; clobber", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def dead early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15_agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, 12 /* clobber */, implicit-def dead early-clobber $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27394057 /* reguse:VReg_512 */, %7
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 13697033 /* reguse:VReg_256 */, %8
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6225929 /* reguse:VReg_128 */, %9
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:VReg_96 */, %10
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:VReg_96 */, %11
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40042505 /* reguse:VReg_512 */, %7
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 19464201 /* reguse:VReg_256 */, %8
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7929865 /* reguse:VReg_128 */, %9
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5963785 /* reguse:VReg_96 */, %10
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5963785 /* reguse:VReg_96 */, %11
$agpr1 = COPY %14
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9 /* reguse */, killed $agpr1
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, killed $agpr1
SI_RETURN
...
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir
index 1b09f5d..ad490f8 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir
@@ -41,9 +41,9 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]]
- ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3801097 /* reguse:AV_64_Align2 */, [[V_MFMA_F64_4X4X4F64_e64_]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]]
+ ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3997705 /* reguse:VReg_64_Align2 */, [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]]
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
%1:av_64_align2 = COPY $vgpr0_vgpr1
@@ -51,7 +51,7 @@ body: |
%3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
%4:vreg_128_align2 = COPY %3
%5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3801097 /* reguse:VReg_64_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3997705 /* reguse:VReg_64_Align2 */, %5
SI_RETURN
...
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir
index d7b713a..0b4e662 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir
@@ -19,7 +19,7 @@ body: |
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
@@ -30,7 +30,7 @@ body: |
%4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4
%5.sub2_sub3 = IMPLICIT_DEF
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
@@ -172,7 +172,7 @@ body: |
; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]].sub2_sub3:areg_128_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3
; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
@@ -183,7 +183,7 @@ body: |
undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4.sub2_sub3
%5.sub2_sub3 = IMPLICIT_DEF
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
@@ -208,7 +208,7 @@ body: |
; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]].sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]].sub2
; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
@@ -219,7 +219,7 @@ body: |
undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub1:areg_128_align2 = COPY %4.sub2
%5.sub2_sub3 = IMPLICIT_DEF
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir
index 57f611b..dcf3b8b 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir
@@ -17,7 +17,7 @@ body: |
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -26,7 +26,7 @@ body: |
%3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
%4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
...
@@ -47,7 +47,7 @@ body: |
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -56,7 +56,7 @@ body: |
%3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
%4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
...
@@ -79,7 +79,7 @@ body: |
; CHECK-NEXT: dead %other_use:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub0_sub1
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_1:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_1]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:VS_64_with_sub0_in_VS_32_Lo128 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -90,7 +90,7 @@ body: |
%other_use:vreg_64_align2 = COPY %4.sub0_sub1
%5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
%6:areg_64_align2 = COPY %5
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:AReg_64_Align2 */, %6:areg_64_align2
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, %6:areg_64_align2
GLOBAL_STORE_DWORDX2 %0, %6, 0, 0, implicit $exec :: (store (s64), addrspace 1)
SI_RETURN
...
@@ -114,7 +114,7 @@ body: |
; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_e64_1:%[0-9]+]].sub0_sub1:areg_128_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_2:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_2]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:VS_64_with_sub0_in_VS_32_Lo128 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -126,7 +126,7 @@ body: |
undef %5.sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4, 0, 0, 0, implicit $mode, implicit $exec
%6:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %5.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
%7:areg_64_align2 = COPY %6
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:AReg_64_Align2 */, %7
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, %7
GLOBAL_STORE_DWORDX2 %0, %7, 0, 0, implicit $exec :: (store (s64), addrspace 1)
SI_RETURN
@@ -151,7 +151,7 @@ body: |
; CHECK-NEXT: dead %other_use:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_2:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_2]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -163,7 +163,7 @@ body: |
%other_use:vreg_64_align2 = COPY %5.sub0_sub1
%6:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %5.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
undef %8.sub0_sub1:areg_128_align2 = COPY %6
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %8:areg_128_align2
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %8:areg_128_align2
GLOBAL_STORE_DWORDX4 %0, %8, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
@@ -231,7 +231,7 @@ body: |
; CHECK-NEXT: dead %other_use1:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3
; CHECK-NEXT: dead %other_use2:vreg_64 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub1_sub2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -245,7 +245,7 @@ body: |
%other_use1:vreg_64_align2 = COPY %4.sub2_sub3
%other_use2:vreg_64 = COPY %4.sub1_sub2
%6:areg_128_align2 = COPY %4
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %6:areg_128_align2
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %6:areg_128_align2
GLOBAL_STORE_DWORDX4 %0, %6, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
...
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
index 335d58c..a18847b 100644
--- a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
@@ -324,11 +324,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom:
; SDAG: ; %bb.0: ; %entry
; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1]
-; SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; SDAG-NEXT: v_xor_b32_e32 v0, s0, v3
+; SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3
; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
@@ -350,10 +348,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; SDAG-NEXT: s_cbranch_execz .LBB21_2
; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private
-; SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2
; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
@@ -367,12 +364,12 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
;
; GISEL-LABEL: flat_atomicrmw_b64_rtn_idxprom:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v0, src_flat_scratch_base_hi
+; GISEL-NEXT: v_mov_b32_e32 v2, v0
; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2
; GISEL-NEXT: v_lshl_add_u64 v[4:5], v[2:3], 3, s[0:1]
; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-NEXT: v_xor_b32_e32 v0, v5, v0
+; GISEL-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
; GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
; GISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo
@@ -394,11 +391,10 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2
; GISEL-NEXT: s_cbranch_execz .LBB21_2
; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private
-; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo
; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GISEL-NEXT: v_sub_nc_u32_e32 v0, v4, v0
+; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
; GISEL-NEXT: s_wait_loadcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll
index b5bb68e..e0ea08d 100644
--- a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll
@@ -97,9 +97,9 @@ entry:
define amdgpu_ps <2 x float> @s_load_b64_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
; GCN-LABEL: s_load_b64_idxprom:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b64 s[4:5], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GCN-NEXT: ; return to shader part epilog
entry:
%idxprom = zext i32 %idx to i64
@@ -111,10 +111,10 @@ entry:
define amdgpu_ps <3 x float> @s_load_b96_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
; GCN-LABEL: s_load_b96_idxprom:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b96 s[4:6], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_mov_b32_e32 v2, s6
; GCN-NEXT: ; return to shader part epilog
entry:
%idxprom = zext i32 %idx to i64
@@ -126,10 +126,10 @@ entry:
define amdgpu_ps <4 x float> @s_load_b128_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
; GCN-LABEL: s_load_b128_idxprom:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b128 s[4:7], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GCN-NEXT: ; return to shader part epilog
entry:
%idxprom = zext i32 %idx to i64
@@ -141,12 +141,12 @@ entry:
define amdgpu_ps <8 x float> @s_load_b256_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
; GCN-LABEL: s_load_b256_idxprom:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b256 s[4:11], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9
+; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
; GCN-NEXT: ; return to shader part epilog
entry:
%idxprom = zext i32 %idx to i64
@@ -158,16 +158,16 @@ entry:
define amdgpu_ps <16 x float> @s_load_b512_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
; GCN-LABEL: s_load_b512_idxprom:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b512 s[4:19], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9
+; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
+; GCN-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v9, s13
+; GCN-NEXT: v_dual_mov_b32 v10, s14 :: v_dual_mov_b32 v11, s15
+; GCN-NEXT: v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v13, s17
+; GCN-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s19
; GCN-NEXT: ; return to shader part epilog
entry:
%idxprom = zext i32 %idx to i64
@@ -275,11 +275,11 @@ entry:
define amdgpu_ps <2 x float> @s_load_b64_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
; GCN-LABEL: s_load_b64_idxprom_range:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_load_b32 s4, s[0:1], 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b64 s[2:3], s[0:1], s4 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GCN-NEXT: ; return to shader part epilog
entry:
%idx = load i32, ptr addrspace(4) %p, align 4, !range !0
@@ -294,10 +294,10 @@ define amdgpu_ps <3 x float> @s_load_b96_idxprom_range(ptr addrspace(4) align 4
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b96 s[4:6], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_mov_b32_e32 v2, s6
; GCN-NEXT: ; return to shader part epilog
entry:
%idx = load i32, ptr addrspace(4) %p, align 4, !range !0
@@ -312,10 +312,10 @@ define amdgpu_ps <4 x float> @s_load_b128_idxprom_range(ptr addrspace(4) align 4
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b128 s[4:7], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GCN-NEXT: ; return to shader part epilog
entry:
%idx = load i32, ptr addrspace(4) %p, align 4, !range !0
@@ -330,12 +330,12 @@ define amdgpu_ps <8 x float> @s_load_b256_idxprom_range(ptr addrspace(4) align 4
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b256 s[4:11], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9
+; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
; GCN-NEXT: ; return to shader part epilog
entry:
%idx = load i32, ptr addrspace(4) %p, align 4, !range !0
@@ -350,16 +350,16 @@ define amdgpu_ps <16 x float> @s_load_b512_idxprom_range(ptr addrspace(4) align
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_load_b512 s[4:19], s[0:1], s2 offset:0x0 scale_offset
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9
+; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
+; GCN-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v9, s13
+; GCN-NEXT: v_dual_mov_b32 v10, s14 :: v_dual_mov_b32 v11, s15
+; GCN-NEXT: v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v13, s17
+; GCN-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s19
; GCN-NEXT: ; return to shader part epilog
entry:
%idx = load i32, ptr addrspace(4) %p, align 4, !range !0
diff --git a/llvm/test/CodeGen/AMDGPU/true16-fold.mir b/llvm/test/CodeGen/AMDGPU/true16-fold.mir
index 93cc12f..9484417 100644
--- a/llvm/test/CodeGen/AMDGPU/true16-fold.mir
+++ b/llvm/test/CodeGen/AMDGPU/true16-fold.mir
@@ -57,6 +57,7 @@ body: |
%4:vgpr_16 = COPY %3:sgpr_lo16
%5:vgpr_32 = V_ALIGNBIT_B32_t16_e64 0, %0:sreg_32, 0, killed %1:sreg_32, 0, killed %4:vgpr_16, 0, 0, implicit $exec
S_ENDPGM 0, implicit %5
+...
---
name: fold_16bit_madmix_clamp
@@ -207,3 +208,27 @@ body: |
$vgpr0 = COPY %4
S_ENDPGM 0, implicit $vgpr0
...
+
+---
+name: fold_imm16_across_reg_sequence
+tracksRegLiveness: true
+registers:
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: fold_imm16_across_reg_sequence
+ ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B16_t16_e64_:%[0-9]+]]:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B16_t16_e64_1:%[0-9]+]]:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B16_t16_e64_]], %subreg.lo16, [[V_MOV_B16_t16_e64_1]], %subreg.hi16
+ ; CHECK-NEXT: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_MAX_F32_e64 0, -1, 0, -1, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_MAX_F32_e64_]]
+ ; CHECK-NEXT: S_ENDPGM 0, implicit $vgpr0
+ %0:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ %1:vgpr_16 = V_MOV_B16_t16_e64 0, -1, 0, implicit $exec
+ %2:vgpr_32 = REG_SEQUENCE %0, %subreg.lo16, %1, %subreg.hi16
+ %3:vgpr_32 = nofpexcept V_MAX_F32_e64 0, %2, 0, %2, 0, 0, implicit $mode, implicit $exec
+ $vgpr0 = COPY %3
+ S_ENDPGM 0, implicit $vgpr0
+...
diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
index f2ecfe8..3d74b17 100644
--- a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
@@ -17,16 +17,16 @@ define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i3
;
; GFX1250-LABEL: v_ashr_pk_i8_i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_and_b32 s2, s2, 31
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_mov_b32_e32 v0, s2
; GFX1250-NEXT: v_ashr_pk_i8_i32 v0, s0, s1, v0
-; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: global_store_b16 v1, v0, s[6:7]
; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
@@ -58,16 +58,16 @@ define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i3
;
; GFX1250-LABEL: v_ashr_pk_u8_i32:
; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
-; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_and_b32 s2, s2, 31
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_mov_b32_e32 v0, s2
; GFX1250-NEXT: v_ashr_pk_u8_i32 v0, s0, s1, v0
-; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: global_store_b16 v1, v0, s[6:7]
; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
diff --git a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
index af8b9e7..6fe99d8 100644
--- a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
@@ -520,6 +520,7 @@ body: |
; GCN-NEXT: GLOBAL_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr4_vgpr5, 16, 0, implicit $exec
; GCN-NEXT: S_WAIT_KMCNT 0
; GCN-NEXT: $sgpr2 = S_ADD_I32 $sgpr0, 100, implicit-def $scc
+ ; GCN-NEXT: S_WAIT_XCNT 0
; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 20, implicit $exec
$sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 0, 0 :: (load (s64), addrspace 4)
$vgpr0 = V_MOV_B32_e32 1, implicit $exec
@@ -921,7 +922,6 @@ body: |
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
...
-# FIXME: Missing S_WAIT_XCNT before overwriting vgpr0.
---
name: wait_kmcnt_with_outstanding_vmem
tracksRegLiveness: true
@@ -937,6 +937,7 @@ body: |
; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
; GCN-NEXT: S_WAIT_KMCNT 0
; GCN-NEXT: $sgpr2 = S_MOV_B32 $sgpr2
+ ; GCN-NEXT: S_WAIT_XCNT 0
; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
$vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
@@ -944,7 +945,6 @@ body: |
$vgpr0 = V_MOV_B32_e32 0, implicit $exec
...
-# FIXME: Missing S_WAIT_XCNT before overwriting sgpr0.
---
name: wait_loadcnt_with_outstanding_smem
tracksRegLiveness: true
@@ -960,6 +960,7 @@ body: |
; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
; GCN-NEXT: S_WAIT_LOADCNT 0
; GCN-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr2, implicit $exec
+ ; GCN-NEXT: S_WAIT_XCNT 0
; GCN-NEXT: $sgpr0 = S_MOV_B32 0
$vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
@@ -967,7 +968,6 @@ body: |
$sgpr0 = S_MOV_B32 0
...
-# TODO: Unnecessary wait before overwriting vgpr0.
---
name: overwrite_vgpr_after_smem
tracksRegLiveness: true
@@ -981,14 +981,12 @@ body: |
; GCN-NEXT: {{ $}}
; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
- ; GCN-NEXT: S_WAIT_XCNT 0
; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
$vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
$vgpr0 = V_MOV_B32_e32 0, implicit $exec
...
-# TODO: Unnecessary wait before overwriting sgpr0.
---
name: overwrite_sgpr_after_vmem
tracksRegLiveness: true
@@ -1002,7 +1000,6 @@ body: |
; GCN-NEXT: {{ $}}
; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
- ; GCN-NEXT: S_WAIT_XCNT 0
; GCN-NEXT: $sgpr0 = S_MOV_B32 0
$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
$vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll b/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll
index a392692..6636eb5 100644
--- a/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll
+++ b/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll
@@ -211,38 +211,39 @@ define amdgpu_kernel void @workgroup_id_xyz(ptr addrspace(1) %ptrx, ptr addrspac
; GFX1250-SDAG-LABEL: workgroup_id_xyz:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_bfe_u32 s0, ttmp6, 0x40014
-; GFX1250-SDAG-NEXT: s_lshr_b32 s6, ttmp7, 16
-; GFX1250-SDAG-NEXT: s_add_co_i32 s7, s0, 1
+; GFX1250-SDAG-NEXT: s_lshr_b32 s8, ttmp7, 16
+; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s0, 1
+; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-SDAG-NEXT: s_mul_i32 s4, s8, s9
; GFX1250-SDAG-NEXT: s_bfe_u32 s9, ttmp6, 0x40010
-; GFX1250-SDAG-NEXT: s_mul_i32 s7, s6, s7
-; GFX1250-SDAG-NEXT: s_bfe_u32 s8, ttmp6, 0x40008
+; GFX1250-SDAG-NEXT: s_bfe_u32 s5, ttmp6, 0x40008
; GFX1250-SDAG-NEXT: s_and_b32 s10, ttmp7, 0xffff
; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, 1
; GFX1250-SDAG-NEXT: s_bfe_u32 s11, ttmp6, 0x4000c
-; GFX1250-SDAG-NEXT: s_add_co_i32 s8, s8, s7
-; GFX1250-SDAG-NEXT: s_mul_i32 s7, s10, s9
+; GFX1250-SDAG-NEXT: s_add_co_i32 s5, s5, s4
+; GFX1250-SDAG-NEXT: s_mul_i32 s4, s10, s9
; GFX1250-SDAG-NEXT: s_bfe_u32 s9, ttmp6, 0x40004
; GFX1250-SDAG-NEXT: s_add_co_i32 s11, s11, 1
-; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, s7
-; GFX1250-SDAG-NEXT: s_and_b32 s7, ttmp6, 15
+; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, s4
+; GFX1250-SDAG-NEXT: s_and_b32 s4, ttmp6, 15
; GFX1250-SDAG-NEXT: s_mul_i32 s11, ttmp9, s11
; GFX1250-SDAG-NEXT: s_getreg_b32 s12, hwreg(HW_REG_IB_STS2, 6, 4)
-; GFX1250-SDAG-NEXT: s_add_co_i32 s7, s7, s11
+; GFX1250-SDAG-NEXT: s_add_co_i32 s4, s4, s11
; GFX1250-SDAG-NEXT: s_cmp_eq_u32 s12, 0
-; GFX1250-SDAG-NEXT: s_cselect_b32 s7, ttmp9, s7
+; GFX1250-SDAG-NEXT: s_cselect_b32 s4, ttmp9, s4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s7
-; GFX1250-SDAG-NEXT: s_cselect_b32 s7, s10, s9
-; GFX1250-SDAG-NEXT: s_cselect_b32 s6, s6, s8
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s6
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
+; GFX1250-SDAG-NEXT: s_cselect_b32 s4, s10, s9
+; GFX1250-SDAG-NEXT: s_cselect_b32 s5, s8, s5
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x2
; GFX1250-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX1250-SDAG-NEXT: global_store_b32 v0, v2, s[2:3]
-; GFX1250-SDAG-NEXT: global_store_b32 v0, v3, s[4:5]
+; GFX1250-SDAG-NEXT: global_store_b32 v0, v3, s[6:7]
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: workgroup_id_xyz:
@@ -250,39 +251,40 @@ define amdgpu_kernel void @workgroup_id_xyz(ptr addrspace(1) %ptrx, ptr addrspac
; GFX1250-GISEL-NEXT: s_bfe_u32 s0, ttmp6, 0x4000c
; GFX1250-GISEL-NEXT: s_and_b32 s1, ttmp6, 15
; GFX1250-GISEL-NEXT: s_add_co_i32 s0, s0, 1
-; GFX1250-GISEL-NEXT: s_getreg_b32 s6, hwreg(HW_REG_IB_STS2, 6, 4)
+; GFX1250-GISEL-NEXT: s_getreg_b32 s8, hwreg(HW_REG_IB_STS2, 6, 4)
; GFX1250-GISEL-NEXT: s_mul_i32 s0, ttmp9, s0
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-GISEL-NEXT: s_add_co_i32 s1, s1, s0
-; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0
-; GFX1250-GISEL-NEXT: s_cselect_b32 s7, ttmp9, s1
+; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0
+; GFX1250-GISEL-NEXT: s_cselect_b32 s9, ttmp9, s1
; GFX1250-GISEL-NEXT: s_bfe_u32 s0, ttmp6, 0x40010
-; GFX1250-GISEL-NEXT: s_and_b32 s8, ttmp7, 0xffff
+; GFX1250-GISEL-NEXT: s_and_b32 s10, ttmp7, 0xffff
; GFX1250-GISEL-NEXT: s_add_co_i32 s0, s0, 1
-; GFX1250-GISEL-NEXT: s_bfe_u32 s9, ttmp6, 0x40004
-; GFX1250-GISEL-NEXT: s_mul_i32 s10, s8, s0
+; GFX1250-GISEL-NEXT: s_bfe_u32 s11, ttmp6, 0x40004
+; GFX1250-GISEL-NEXT: s_mul_i32 s12, s10, s0
+; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x10
+; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s12
+; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, s9
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
-; GFX1250-GISEL-NEXT: s_add_co_i32 s9, s9, s10
-; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, s7
-; GFX1250-GISEL-NEXT: s_cselect_b32 s8, s8, s9
-; GFX1250-GISEL-NEXT: s_bfe_u32 s9, ttmp6, 0x40014
+; GFX1250-GISEL-NEXT: s_cselect_b32 s4, s10, s11
+; GFX1250-GISEL-NEXT: s_bfe_u32 s5, ttmp6, 0x40014
; GFX1250-GISEL-NEXT: s_lshr_b32 s10, ttmp7, 16
-; GFX1250-GISEL-NEXT: s_add_co_i32 s9, s9, 1
+; GFX1250-GISEL-NEXT: s_add_co_i32 s5, s5, 1
; GFX1250-GISEL-NEXT: s_bfe_u32 s11, ttmp6, 0x40008
-; GFX1250-GISEL-NEXT: s_mul_i32 s9, s10, s9
+; GFX1250-GISEL-NEXT: s_mul_i32 s5, s10, s5
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s9
-; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0
-; GFX1250-GISEL-NEXT: s_cselect_b32 s6, s10, s11
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v3, s6
+; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s5
+; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0
+; GFX1250-GISEL-NEXT: s_cselect_b32 s5, s10, s11
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x2
; GFX1250-GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1250-GISEL-NEXT: global_store_b32 v1, v2, s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b32 v1, v3, s[4:5]
+; GFX1250-GISEL-NEXT: global_store_b32 v1, v3, s[6:7]
; GFX1250-GISEL-NEXT: s_endpgm
; GFX12-LABEL: workgroup_id_xyz:
; GFX12: ; %bb.0:
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
new file mode 100644
index 0000000..0f84b76
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; V7M-LABEL: mask_pair:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: mask_pair:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: mask_pair:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: mask_pair:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: bx lr
+ %shl = shl nsw i32 -1, %y
+ %and = and i32 %shl, %x
+ ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; V7M-LABEL: mask_pair_64:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: and.w r0, r0, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: mask_pair_64:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: mask_pair_64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: and.w r0, r0, r12
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: mask_pair_64:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shl = shl nsw i64 -1, %y
+ %and = and i64 %shl, %x
+ ret i64 %and
+}
diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll
new file mode 100644
index 0000000..77deaa5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/extract-bits.ll
@@ -0,0 +1,4591 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M
+
+; Patterns:
+; a) (x >> start) & (1 << nbits) - 1
+; b) (x >> start) & ~(-1 << nbits)
+; c) (x >> start) & (-1 >> (32 - y))
+; d) (x >> start) << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a0_arithmetic:
+; V7M: @ %bb.0:
+; V7M-NEXT: asrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a0_arithmetic:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, asr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a0_arithmetic:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: asrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a0_arithmetic:
+; V6M: @ %bb.0:
+; V6M-NEXT: asrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %shifted = ashr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: lsrs r3, r1
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: ands r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: lsrs r3, r1
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: ands r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %shifted, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w r4, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r4
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r3, r12, #32
+; V7A-NEXT: subs r4, r12, #32
+; V7A-NEXT: lsr r3, lr, r3
+; V7A-NEXT: lslpl r3, lr, r4
+; V7A-NEXT: lsl r4, lr, r12
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs r4, r4, #1
+; V7A-NEXT: sbc r12, r3, #0
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r0, r4, r0
+; V7A-NEXT: and r1, r12, r1
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r3, r12, #32
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: lsl.w r3, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #32]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a0_arithmetic:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w r4, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: asrpl.w r0, r1, r4
+; V7M-NEXT: asr.w r2, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: asrpl r2, r1, #31
+; V7M-NEXT: and.w r1, r12, r2
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0_arithmetic:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r3, r12, #32
+; V7A-NEXT: subs r4, r12, #32
+; V7A-NEXT: lsr r3, lr, r3
+; V7A-NEXT: lslpl r3, lr, r4
+; V7A-NEXT: lsl r4, lr, r12
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs r4, r4, #1
+; V7A-NEXT: sbc r12, r3, #0
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: asr r2, r1, r2
+; V7A-NEXT: asrpl r0, r1, r3
+; V7A-NEXT: asrpl r2, r1, #31
+; V7A-NEXT: and r0, r4, r0
+; V7A-NEXT: and r1, r12, r2
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a0_arithmetic:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r3, r12, #32
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: lsl.w r3, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: asrpl.w r0, r1, r4
+; V7A-T-NEXT: asr.w r2, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: asrpl r2, r1, #31
+; V7A-T-NEXT: and.w r1, r12, r2
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a0_arithmetic:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #32]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_lasr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %shifted = ashr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: rsb.w r4, r3, #32
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: subs.w r12, r3, #32
+; V7M-NEXT: lsl.w r3, lr, r3
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r4
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: rsb r12, r3, #32
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: subs r4, r3, #32
+; V7A-NEXT: lsl r3, lr, r3
+; V7A-NEXT: lsr r12, lr, r12
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: lslpl r12, lr, r4
+; V7A-NEXT: rsb r4, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: subs r3, r3, #1
+; V7A-NEXT: sbc r12, r12, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r4
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r4
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: and r1, r12, r1
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r3, #32
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: subs.w r12, r3, #32
+; V7A-T-NEXT: lsl.w r3, lr, r3
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: lsr.w r0, r0, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: mov r2, r3
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: rsb.w r1, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r1, lr, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs.w lr, r3, #1
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: sbc r12, r1, #0
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: lsr.w r1, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r0, r0, lr
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r6, lr}
+; V7A-NEXT: push {r4, r5, r6, lr}
+; V7A-NEXT: ldr r1, [sp, #16]
+; V7A-NEXT: mov r3, #1
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: ldr r5, [r0, #4]
+; V7A-NEXT: rsb r0, r1, #32
+; V7A-NEXT: subs r4, r1, #32
+; V7A-NEXT: lsl r1, r3, r1
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lslpl r0, r3, r4
+; V7A-NEXT: subs r1, r1, #1
+; V7A-NEXT: sbc r3, r0, #0
+; V7A-NEXT: lsr r0, r6, r2
+; V7A-NEXT: rsb r6, r2, #32
+; V7A-NEXT: orr r0, r0, r5, lsl r6
+; V7A-NEXT: subs r6, r2, #32
+; V7A-NEXT: lsrpl r0, r5, r6
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: lsr r1, r5, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r4, r5, r6, pc}
+;
+; V7A-T-LABEL: bextr64_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: movs r3, #1
+; V7A-T-NEXT: ldrd lr, r1, [r0]
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r0, r12, #32
+; V7A-T-NEXT: lsr.w r4, r3, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, r3, r0
+; V7A-T-NEXT: lsl.w r0, r3, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsr.w r3, lr, r2
+; V7A-T-NEXT: subs r0, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r3, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #4
+; V6M-NEXT: sub sp, #4
+; V6M-NEXT: str r2, [sp] @ 4-byte Spill
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #24]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r6, r1
+; V6M-NEXT: subs r4, r0, #1
+; V6M-NEXT: sbcs r6, r7
+; V6M-NEXT: ldm r5!, {r0, r1}
+; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: ands r1, r6
+; V6M-NEXT: add sp, #4
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs.w lr, r2, #1
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: sbc r12, r3, #0
+; V7M-NEXT: rsb.w r3, r1, #32
+; V7M-NEXT: lsl.w r3, r2, r3
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r1, #32
+; V7M-NEXT: lsr.w r1, r2, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r2, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r0, r0, lr
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r6, lr}
+; V7A-NEXT: push {r4, r5, r6, lr}
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: mov r3, #1
+; V7A-NEXT: ldr r5, [r0, #4]
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl r0, r3, r4
+; V7A-NEXT: subs r3, r2, #1
+; V7A-NEXT: sbc r0, r0, #0
+; V7A-NEXT: lsr r2, r5, r1
+; V7A-NEXT: subs r4, r1, #32
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r2, r0, r2
+; V7A-NEXT: lsr r0, r6, r1
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: orr r0, r0, r5, lsl r1
+; V7A-NEXT: mov r1, r2
+; V7A-NEXT: lsrpl r0, r5, r4
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: pop {r4, r5, r6, pc}
+;
+; V7A-T-LABEL: bextr64_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: lsl.w r2, lr, r2
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: ldrd r12, r0, [r0]
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs.w lr, r2, #1
+; V7A-T-NEXT: sbc r2, r4, #0
+; V7A-T-NEXT: lsr.w r4, r0, r1
+; V7A-T-NEXT: subs.w r3, r1, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r4, #0
+; V7A-T-NEXT: and.w r2, r2, r4
+; V7A-T-NEXT: rsb.w r4, r1, #32
+; V7A-T-NEXT: lsr.w r1, r12, r1
+; V7A-T-NEXT: lsl.w r4, r0, r4
+; V7A-T-NEXT: orr.w r1, r1, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r1, r0, r3
+; V7A-T-NEXT: and.w r0, lr, r1
+; V7A-T-NEXT: mov r1, r2
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #4
+; V6M-NEXT: sub sp, #4
+; V6M-NEXT: str r1, [sp] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: subs r4, r0, #1
+; V6M-NEXT: sbcs r5, r7
+; V6M-NEXT: ldm r6!, {r0, r1}
+; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: add sp, #4
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w r4, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r4
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r3, r12, #32
+; V7A-NEXT: subs r4, r12, #32
+; V7A-NEXT: lsr r3, lr, r3
+; V7A-NEXT: lslpl r3, lr, r4
+; V7A-NEXT: lsl r4, lr, r12
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs r4, r4, #1
+; V7A-NEXT: sbc r12, r3, #0
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r0, r0, r4
+; V7A-NEXT: and r1, r1, r12
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r3, r12, #32
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: lsl.w r3, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #32]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %shifted, %mask ; swapped order
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsls r2, r1
+; V7M-NEXT: subs r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r1, r2, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mov r1, #1
+; V7A-NEXT: lsl r1, r1, r12
+; V7A-NEXT: subs r2, r12, #32
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: sub r1, r1, #1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: subs.w r2, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ %res = trunc i64 %masked to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_a1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_a1:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: add r12, r3, lr, lsl r12
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: and r0, r12, r0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_a1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_a1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %truncshifted = trunc i64 %shifted to i32
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %truncshifted
+ ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_a2:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_a2:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: add r12, r3, lr, lsl r12
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: and r0, r12, r0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_a2:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_a2:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %zextmask = zext i32 %mask to i64
+ %masked = and i64 %zextmask, %shifted
+ %truncmasked = trunc i64 %masked to i32
+ ret i32 %truncmasked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: mvns r1, r1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: mvns r1, r1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #0
+; V6M-NEXT: mvns r3, r3
+; V6M-NEXT: lsls r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bics r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #0
+; V6M-NEXT: mvns r3, r3
+; V6M-NEXT: lsls r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bics r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: mvns r1, r1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %shifted, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: subs.w lr, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: subs lr, r12, #32
+; V7A-NEXT: lsl r2, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: lslpl r3, r3, lr
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #16]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r5, r0, r3
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: subs.w lr, r12, #32
+; V7A-T-NEXT: lsl.w r0, r3, r12
+; V7A-T-NEXT: itt pl
+; V7A-T-NEXT: lslpl.w r3, r3, lr
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r5, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: bic.w r0, r5, r0
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: ldr r2, [sp, #16]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: bics r5, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsr.w r12, r0, r2
+; V7M-NEXT: rsb.w r0, r2, #32
+; V7M-NEXT: lsl.w r0, r1, r0
+; V7M-NEXT: orr.w r12, r12, r0
+; V7M-NEXT: subs.w r0, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r1, r0
+; V7M-NEXT: lsr.w r0, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: subs.w r1, r3, #32
+; V7M-NEXT: lsl.w r3, r2, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r2, r1
+; V7M-NEXT: bic.w r1, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bic.w r0, r12, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r12, r0, r2
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: orr r12, r12, r1, lsl r0
+; V7A-NEXT: subs r0, r2, #32
+; V7A-NEXT: lsrpl r12, r1, r0
+; V7A-NEXT: lsr r0, r1, r2
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: subs r1, r3, #32
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: lsl r3, r2, r3
+; V7A-NEXT: lslpl r2, r2, r1
+; V7A-NEXT: bic r1, r0, r2
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: bic r0, r12, r3
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsr.w r12, r0, r2
+; V7A-T-NEXT: rsb.w r0, r2, #32
+; V7A-T-NEXT: lsl.w r0, r1, r0
+; V7A-T-NEXT: orr.w r12, r12, r0
+; V7A-T-NEXT: subs.w r0, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r12, r1, r0
+; V7A-T-NEXT: lsr.w r0, r1, r2
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r1, r3, #32
+; V7A-T-NEXT: lsl.w r3, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r2, r1
+; V7A-T-NEXT: bic.w r1, r0, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: bic.w r0, r12, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r4, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r6, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r6, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: lsr.w r1, r3, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: subs.w lr, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldrd r0, r1, [r0]
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: subs lr, r12, #32
+; V7A-NEXT: lsl r2, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: lslpl r3, r3, lr
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: ldrd r0, r3, [r0]
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: orrs r0, r1
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r1
+; V7A-T-NEXT: lsr.w r1, r3, r2
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: lsl.w r2, r3, r12
+; V7A-T-NEXT: subs.w lr, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r3, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: ldr r2, [sp, #16]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: bics r5, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldrd r12, r0, [r0]
+; V7M-NEXT: rsb.w r3, r1, #32
+; V7M-NEXT: lsl.w lr, r0, r3
+; V7M-NEXT: lsr.w r3, r12, r1
+; V7M-NEXT: orr.w r12, r3, lr
+; V7M-NEXT: subs.w r3, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r0, r3
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r1
+; V7M-NEXT: bic.w r1, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: bic.w r0, r12, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldm r0, {r0, r3}
+; V7A-NEXT: lsr r12, r0, r1
+; V7A-NEXT: rsb r0, r1, #32
+; V7A-NEXT: orr r12, r12, r3, lsl r0
+; V7A-NEXT: subs r0, r1, #32
+; V7A-NEXT: lsrpl r12, r3, r0
+; V7A-NEXT: lsr r0, r3, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: subs r1, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r1
+; V7A-NEXT: bic r1, r0, r3
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r12, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: ldrd r12, r3, [r0]
+; V7A-T-NEXT: rsb.w r0, r1, #32
+; V7A-T-NEXT: lsl.w lr, r3, r0
+; V7A-T-NEXT: lsr.w r0, r12, r1
+; V7A-T-NEXT: orr.w r12, r0, lr
+; V7A-T-NEXT: subs.w r0, r1, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r12, r3, r0
+; V7A-T-NEXT: lsr.w r0, r3, r1
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r1
+; V7A-T-NEXT: bic.w r1, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: bic.w r0, r12, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r4, r2
+; V6M-NEXT: mov r2, r1
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r6, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r6, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: subs.w lr, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: subs lr, r12, #32
+; V7A-NEXT: lsl r2, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: lslpl r3, r3, lr
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #16]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r5, r0, r3
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: subs.w lr, r12, #32
+; V7A-T-NEXT: lsl.w r0, r3, r12
+; V7A-T-NEXT: itt pl
+; V7A-T-NEXT: lslpl.w r3, r3, lr
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r5, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: bic.w r0, r5, r0
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: ldr r2, [sp, #16]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: bics r5, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %shifted, %mask ; swapped order
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldrb.w r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsls r2, r1
+; V7M-NEXT: subs r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldrb r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: lsl r1, r1, r12
+; V7A-NEXT: subs r2, r12, #32
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: bic r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsr.w r12, r0, r2
+; V7A-T-NEXT: rsb.w r0, r2, #32
+; V7A-T-NEXT: ldrb.w r3, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r0, r1, r0
+; V7A-T-NEXT: orr.w r0, r0, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: lsls r1, r3
+; V7A-T-NEXT: subs.w r2, r3, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: add r1, sp, #8
+; V6M-NEXT: ldrb r2, [r1]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: pop {r4, pc}
+ %shiftedval = lshr i64 %val, %numskipbits
+ %widenumlowbits = zext i8 %numlowbits to i64
+ %notmask = shl nsw i64 -1, %widenumlowbits
+ %mask = xor i64 %notmask, -1
+ %wideres = and i64 %shiftedval, %mask
+ %res = trunc i64 %wideres to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_b1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldrb.w r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_b1:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldrb r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r12
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_b1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldrb.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_b1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: add r1, sp, #8
+; V6M-NEXT: ldrb r1, [r1]
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: pop {r7, pc}
+ %shiftedval = lshr i64 %val, %numskipbits
+ %truncshiftedval = trunc i64 %shiftedval to i32
+ %widenumlowbits = zext i8 %numlowbits to i32
+ %notmask = shl nsw i32 -1, %widenumlowbits
+ %mask = xor i32 %notmask, -1
+ %res = and i32 %truncshiftedval, %mask
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_b2:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldrb.w r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_b2:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldrb r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r12
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_b2:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldrb.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_b2:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: add r1, sp, #8
+; V6M-NEXT: ldrb r1, [r1]
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: pop {r7, pc}
+ %shiftedval = lshr i64 %val, %numskipbits
+ %widenumlowbits = zext i8 %numlowbits to i32
+ %notmask = shl nsw i32 -1, %widenumlowbits
+ %mask = xor i32 %notmask, -1
+ %zextmask = zext i32 %mask to i64
+ %wideres = and i64 %shiftedval, %zextmask
+ %res = trunc i64 %wideres to i32
+ ret i32 %res
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %shifted, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsr.w r3, r2, r3
+; V7M-NEXT: rsbs.w r12, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r2, r2, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr r12, [sp, #16]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r5, r1, r2
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r4, r12, #64
+; V7A-NEXT: rsbs lr, r12, #32
+; V7A-NEXT: lsr r4, r3, r4
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: movwpl r5, #0
+; V7A-NEXT: and r12, r4, r5
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: mov r1, r12
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #-1
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: rsb.w r2, r12, #64
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: lsr.w r2, lr, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: ldr r0, [sp, #16]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: lsr.w r12, r0, r2
+; V7M-NEXT: rsb.w r0, r2, #32
+; V7M-NEXT: lsl.w r0, r1, r0
+; V7M-NEXT: orr.w r12, r12, r0
+; V7M-NEXT: subs.w r0, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r1, r0
+; V7M-NEXT: rsb.w r0, r3, #64
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: uxtb r0, r0
+; V7M-NEXT: subs.w lr, r0, #32
+; V7M-NEXT: lsr.w r2, r3, r0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: and.w r0, r3, r12
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: uxtb r12, r2
+; V7A-NEXT: lsr lr, r0, r12
+; V7A-NEXT: rsb r0, r12, #32
+; V7A-NEXT: orr r4, lr, r1, lsl r0
+; V7A-NEXT: mvn lr, #31
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lsrpl r4, r1, r2
+; V7A-NEXT: rsb r2, r3, #64
+; V7A-NEXT: lsr r1, r1, r12
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: uxtb r12, r2
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lsr r0, r3, r12
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: and r1, r0, r1
+; V7A-NEXT: lsrpl r3, r3, r2
+; V7A-NEXT: and r0, r3, r4
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: uxtb.w r12, r2
+; V7A-T-NEXT: lsr.w lr, r0, r12
+; V7A-T-NEXT: rsb.w r0, r12, #32
+; V7A-T-NEXT: lsl.w r0, r1, r0
+; V7A-T-NEXT: orr.w r4, lr, r0
+; V7A-T-NEXT: mvn lr, #31
+; V7A-T-NEXT: uxtab r2, lr, r2
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r4, r1, r2
+; V7A-T-NEXT: rsb.w r2, r3, #64
+; V7A-T-NEXT: lsr.w r1, r1, r12
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: uxtb.w r12, r2
+; V7A-T-NEXT: uxtab r2, lr, r2
+; V7A-T-NEXT: lsr.w r0, r3, r12
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: and.w r1, r1, r0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: and.w r0, r3, r4
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r3
+; V6M-NEXT: uxtb r2, r2
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r5
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r6
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: ldr.w r12, [sp]
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: lsr.w r1, r3, r2
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsbs.w r12, r12, #32
+; V7M-NEXT: lsr.w r3, r2, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r2, r2, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r8, lr}
+; V7A-NEXT: push {r4, r6, r8, lr}
+; V7A-NEXT: ldr r12, [sp, #16]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: rsb r6, r12, #64
+; V7A-NEXT: ldr r8, [r0]
+; V7A-NEXT: mvn r0, #0
+; V7A-NEXT: rsbs r1, r12, #32
+; V7A-NEXT: lsr r6, r0, r6
+; V7A-NEXT: lsr r4, r3, r2
+; V7A-NEXT: lsrpl r0, r0, r1
+; V7A-NEXT: movwpl r6, #0
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: and r1, r6, r4
+; V7A-NEXT: lsr r6, r8, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: orr r2, r6, r3, lsl r2
+; V7A-NEXT: lsrpl r2, r3, r12
+; V7A-NEXT: and r0, r0, r2
+; V7A-NEXT: pop {r4, r6, r8, pc}
+;
+; V7A-T-LABEL: bextr64_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldrd r0, r3, [r0]
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: orrs r0, r1
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r1
+; V7A-T-NEXT: lsr.w r1, r3, r2
+; V7A-T-NEXT: rsb.w r2, r12, #64
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsbs.w r12, r12, #32
+; V7A-T-NEXT: lsr.w r2, r3, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r3, r12
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: ldr r0, [sp, #16]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsr.w r12, r0, r1
+; V7M-NEXT: rsb.w r0, r1, #32
+; V7M-NEXT: lsl.w r0, r3, r0
+; V7M-NEXT: orr.w r12, r12, r0
+; V7M-NEXT: subs.w r0, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r3, r0
+; V7M-NEXT: rsb.w r0, r2, #64
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: uxtb r0, r0
+; V7M-NEXT: subs.w lr, r0, #32
+; V7M-NEXT: lsr.w r2, r3, r0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: and.w r0, r3, r12
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r4, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: uxtb r0, r1
+; V7A-NEXT: lsr r12, r4, r0
+; V7A-NEXT: rsb r4, r0, #32
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: orr lr, r12, r3, lsl r4
+; V7A-NEXT: mvn r12, #31
+; V7A-NEXT: uxtab r1, r12, r1
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lsrpl lr, r3, r1
+; V7A-NEXT: rsb r1, r2, #64
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: uxtb r2, r1
+; V7A-NEXT: uxtab r4, r12, r1
+; V7A-NEXT: lsr r2, r3, r2
+; V7A-NEXT: cmp r4, #0
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r1, r2, r0
+; V7A-NEXT: lsrpl r3, r3, r4
+; V7A-NEXT: and r0, r3, lr
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r7, lr}
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: uxtb r0, r1
+; V7A-T-NEXT: rsb.w r3, r0, #32
+; V7A-T-NEXT: lsl.w r4, lr, r3
+; V7A-T-NEXT: lsr.w r3, r12, r0
+; V7A-T-NEXT: orr.w r5, r3, r4
+; V7A-T-NEXT: mvn r12, #31
+; V7A-T-NEXT: uxtab r1, r12, r1
+; V7A-T-NEXT: lsr.w r0, lr, r0
+; V7A-T-NEXT: cmp r1, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r5, lr, r1
+; V7A-T-NEXT: rsb.w r1, r2, #64
+; V7A-T-NEXT: mov.w r4, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: uxtb r2, r1
+; V7A-T-NEXT: uxtab r3, r12, r1
+; V7A-T-NEXT: lsr.w r2, r4, r2
+; V7A-T-NEXT: cmp r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: and.w r1, r2, r0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r4, r3
+; V7A-T-NEXT: and.w r0, r4, r5
+; V7A-T-NEXT: pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r2
+; V6M-NEXT: ldr r4, [r0]
+; V6M-NEXT: ldr r3, [r0, #4]
+; V6M-NEXT: uxtb r2, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r5
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r6
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsr.w r3, r2, r3
+; V7M-NEXT: rsbs.w r12, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r2, r2, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr r12, [sp, #16]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r5, r1, r2
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r4, r12, #64
+; V7A-NEXT: rsbs lr, r12, #32
+; V7A-NEXT: lsr r4, r3, r4
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: movwpl r5, #0
+; V7A-NEXT: and r12, r5, r4
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: mov r1, r12
+; V7A-NEXT: and r0, r0, r3
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #-1
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: rsb.w r2, r12, #64
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: lsr.w r2, lr, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: ldr r0, [sp, #16]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %shifted, %mask ; swapped order
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: rsbs.w r1, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl r2, r1
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r3, [sp]
+; V7A-NEXT: rsbs r12, r3, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsrpl r3, r3, r12
+; V7A-NEXT: lsr r12, r0, r2
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r12, r1, lsl r0
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: rsbs.w r1, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r2, r1
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: ldr r0, [sp, #8]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %shifted
+ %res = trunc i64 %masked to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_c1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_c1:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r1, r12, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_c1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r12, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_c1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %truncshifted = trunc i64 %shifted to i32
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %truncshifted
+ ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_c2:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_c2:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r1, r12, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_c2:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r12, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_c2:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %zextmask = zext i32 %mask to i64
+ %masked = and i64 %zextmask, %shifted
+ %truncmasked = trunc i64 %masked to i32
+ ret i32 %truncmasked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %shifted, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %shifted, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %shifted, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %shifted, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsb.w lr, r12, #32
+; V7M-NEXT: rsb.w r12, r3, #32
+; V7M-NEXT: lsls r1, r3
+; V7M-NEXT: cmp.w lr, #0
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orr.w r1, r1, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, lr
+; V7M-NEXT: lsl.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r2, r1, r12
+; V7M-NEXT: lsr.w r0, r0, r3
+; V7M-NEXT: orr.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, lr
+; V7M-NEXT: lsr.w r1, r1, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: lsr r3, r1, r2
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: rsb r1, r12, #64
+; V7A-NEXT: rsb lr, r1, #32
+; V7A-NEXT: lsr r2, r0, lr
+; V7A-NEXT: orr r2, r2, r3, lsl r1
+; V7A-NEXT: rsbs r3, r12, #32
+; V7A-NEXT: lslpl r2, r0, r3
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: lsr r1, r2, r1
+; V7A-NEXT: orr r0, r0, r2, lsl lr
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lsrpl r0, r2, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: rsb.w r3, r12, #64
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsls r1, r3
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: lsr.w r4, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %shifted, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: uxtb.w lr, r2
+; V7M-NEXT: subs.w r2, lr, #32
+; V7M-NEXT: lsr.w r12, r0, lr
+; V7M-NEXT: rsb.w r0, lr, #32
+; V7M-NEXT: lsl.w r0, r1, r0
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: rsb.w r2, r3, #64
+; V7M-NEXT: lsr.w r1, r1, lr
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsb.w r12, r2, #32
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: sub.w r3, r2, #32
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orrs r1, r4
+; V7M-NEXT: cmp r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r3
+; V7M-NEXT: lsl.w r0, r0, r2
+; V7M-NEXT: lsl.w r4, r1, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: orr.w r0, r0, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: uxtb r12, r2
+; V7A-NEXT: lsr lr, r0, r12
+; V7A-NEXT: rsb r0, r12, #32
+; V7A-NEXT: orr r0, lr, r1, lsl r0
+; V7A-NEXT: mvn lr, #31
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r2, r3, #64
+; V7A-NEXT: lsr r1, r1, r12
+; V7A-NEXT: uxtb r3, r2
+; V7A-NEXT: rsb r4, r3, #32
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: lsr r5, r0, r4
+; V7A-NEXT: orr r1, r5, r1, lsl r3
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lslpl r1, r0, r2
+; V7A-NEXT: lsl r0, r0, r3
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r3
+; V7A-NEXT: orr r0, r0, r1, lsl r4
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: lsr r1, r1, r3
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r6, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r6, r7, lr}
+; V7A-T-NEXT: uxtb.w r12, r2
+; V7A-T-NEXT: rsb.w r6, r12, #32
+; V7A-T-NEXT: rsb.w r3, r3, #64
+; V7A-T-NEXT: lsr.w r0, r0, r12
+; V7A-T-NEXT: mvn r7, #31
+; V7A-T-NEXT: uxtab r2, r7, r2
+; V7A-T-NEXT: lsl.w r6, r1, r6
+; V7A-T-NEXT: lsr.w lr, r1, r12
+; V7A-T-NEXT: orrs r0, r6
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w lr, #0
+; V7A-T-NEXT: uxtb r5, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r5, #32
+; V7A-T-NEXT: uxtab r3, r7, r3
+; V7A-T-NEXT: lsl.w r4, lr, r5
+; V7A-T-NEXT: lsr.w r2, r0, r1
+; V7A-T-NEXT: cmp r3, #0
+; V7A-T-NEXT: orr.w r2, r2, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r2, r0, r3
+; V7A-T-NEXT: lsl.w r0, r0, r5
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: lsr.w r0, r0, r5
+; V7A-T-NEXT: orr.w r0, r0, r1
+; V7A-T-NEXT: lsr.w r1, r2, r5
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, r5, r6, r7, pc}
+;
+; V6M-LABEL: bextr64_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r3
+; V6M-NEXT: uxtb r2, r2
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: movs r2, #64
+; V6M-NEXT: subs r2, r2, r4
+; V6M-NEXT: uxtb r4, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %shifted, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w lr, r12, #32
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: rsb.w r1, r12, #64
+; V7M-NEXT: lsr.w r2, r3, r2
+; V7M-NEXT: rsb.w r12, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: cmp.w lr, #0
+; V7M-NEXT: lsl.w r2, r2, r1
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orr.w r2, r2, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r0, lr
+; V7M-NEXT: lsl.w r0, r0, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: lsr.w r1, r2, r1
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldrd r0, r1, [r0]
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: lsr r3, r1, r2
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: rsb r1, r12, #64
+; V7A-NEXT: rsb lr, r1, #32
+; V7A-NEXT: lsr r2, r0, lr
+; V7A-NEXT: orr r2, r2, r3, lsl r1
+; V7A-NEXT: rsbs r3, r12, #32
+; V7A-NEXT: lslpl r2, r0, r3
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: lsr r1, r2, r1
+; V7A-NEXT: orr r0, r0, r2, lsl lr
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lsrpl r0, r2, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldrd r0, r3, [r0]
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: orrs r0, r1
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r1
+; V7A-T-NEXT: lsr.w r2, r3, r2
+; V7A-T-NEXT: rsb.w r1, r12, #64
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: rsb.w lr, r1, #32
+; V7A-T-NEXT: rsbs.w r3, r12, #32
+; V7A-T-NEXT: lsl.w r2, r2, r1
+; V7A-T-NEXT: lsr.w r4, r0, lr
+; V7A-T-NEXT: orr.w r2, r2, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r2, r0, r3
+; V7A-T-NEXT: lsl.w r0, r0, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r2, lr
+; V7A-T-NEXT: lsr.w r0, r0, r1
+; V7A-T-NEXT: lsr.w r1, r2, r1
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %shifted, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldrd r0, lr, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: subs.w r3, r1, #32
+; V7M-NEXT: lsr.w r12, r0, r1
+; V7M-NEXT: rsb.w r0, r1, #32
+; V7M-NEXT: lsr.w r1, lr, r1
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: lsl.w r0, lr, r0
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: rsb.w r12, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, lr, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: sub.w r3, r2, #32
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orrs r1, r4
+; V7M-NEXT: cmp r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r3
+; V7M-NEXT: lsl.w r0, r0, r2
+; V7M-NEXT: lsl.w r4, r1, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: orr.w r0, r0, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr r4, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: uxtb r0, r1
+; V7A-NEXT: lsr r12, r4, r0
+; V7A-NEXT: rsb r4, r0, #32
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: orr r4, r12, r3, lsl r4
+; V7A-NEXT: mvn r12, #31
+; V7A-NEXT: uxtab r1, r12, r1
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lsrpl r4, r3, r1
+; V7A-NEXT: rsb r1, r2, #64
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: uxtb r2, r1
+; V7A-NEXT: rsb lr, r2, #32
+; V7A-NEXT: uxtab r1, r12, r1
+; V7A-NEXT: lsr r5, r4, lr
+; V7A-NEXT: orr r3, r5, r0, lsl r2
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lsl r0, r4, r2
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lslpl r3, r4, r1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: orr r0, r0, r3, lsl lr
+; V7A-NEXT: lsrpl r0, r3, r1
+; V7A-NEXT: lsr r1, r3, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r6, lr}
+; V7A-T-NEXT: push {r4, r5, r6, lr}
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: uxtb r0, r1
+; V7A-T-NEXT: rsb.w r6, r0, #32
+; V7A-T-NEXT: lsr.w r3, lr, r0
+; V7A-T-NEXT: rsb.w r2, r2, #64
+; V7A-T-NEXT: mvn r4, #31
+; V7A-T-NEXT: lsr.w r0, r12, r0
+; V7A-T-NEXT: uxtab r1, r4, r1
+; V7A-T-NEXT: lsl.w r6, lr, r6
+; V7A-T-NEXT: orrs r0, r6
+; V7A-T-NEXT: cmp r1, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: uxtb r5, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, lr, r1
+; V7A-T-NEXT: rsb.w r1, r5, #32
+; V7A-T-NEXT: lsls r3, r5
+; V7A-T-NEXT: uxtab r2, r4, r2
+; V7A-T-NEXT: lsr.w r6, r0, r1
+; V7A-T-NEXT: orrs r3, r6
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r5
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsr.w r0, r0, r5
+; V7A-T-NEXT: orr.w r0, r0, r1
+; V7A-T-NEXT: lsr.w r1, r3, r5
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, r5, r6, pc}
+;
+; V6M-LABEL: bextr64_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r2
+; V6M-NEXT: ldr r5, [r0]
+; V6M-NEXT: ldr r3, [r0, #4]
+; V6M-NEXT: uxtb r2, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: movs r2, #64
+; V6M-NEXT: subs r2, r2, r4
+; V6M-NEXT: uxtb r4, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %shifted, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsb.w lr, r12, #32
+; V7M-NEXT: rsb.w r12, r3, #32
+; V7M-NEXT: lsls r1, r3
+; V7M-NEXT: cmp.w lr, #0
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orr.w r1, r1, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, lr
+; V7M-NEXT: lsl.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r2, r1, r12
+; V7M-NEXT: lsr.w r0, r0, r3
+; V7M-NEXT: orr.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, lr
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_32_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: lsr r3, r1, r2
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: rsb r1, r12, #64
+; V7A-NEXT: rsb lr, r1, #32
+; V7A-NEXT: lsr r2, r0, lr
+; V7A-NEXT: orr r2, r2, r3, lsl r1
+; V7A-NEXT: rsbs r3, r12, #32
+; V7A-NEXT: lslpl r2, r0, r3
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: orr r0, r0, r2, lsl lr
+; V7A-NEXT: lsrpl r0, r2, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: rsb.w r3, r12, #64
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsls r1, r3
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: lsr.w r4, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_32_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %shifted, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ %res = trunc i64 %masked to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_d1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_d1:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r1, r12, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_d1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r12, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_d1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %truncshifted = trunc i64 %shifted to i32
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %truncshifted, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant
+; ---------------------------------------------------------------------------- ;
+
+; https://bugs.llvm.org/show_bug.cgi?id=38938
+define void @pr38938(ptr %a0, ptr %a1) nounwind {
+; V7M-LABEL: pr38938:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r1, [r1]
+; V7M-NEXT: ubfx r1, r1, #21, #10
+; V7M-NEXT: ldr.w r2, [r0, r1, lsl #2]
+; V7M-NEXT: adds r2, #1
+; V7M-NEXT: str.w r2, [r0, r1, lsl #2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: pr38938:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r1, [r1]
+; V7A-NEXT: ubfx r1, r1, #21, #10
+; V7A-NEXT: ldr r2, [r0, r1, lsl #2]
+; V7A-NEXT: add r2, r2, #1
+; V7A-NEXT: str r2, [r0, r1, lsl #2]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: pr38938:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r1, [r1]
+; V7A-T-NEXT: ubfx r1, r1, #21, #10
+; V7A-T-NEXT: ldr.w r2, [r0, r1, lsl #2]
+; V7A-T-NEXT: adds r2, #1
+; V7A-T-NEXT: str.w r2, [r0, r1, lsl #2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: pr38938:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r1]
+; V6M-NEXT: lsrs r1, r1, #19
+; V6M-NEXT: ldr r2, .LCPI51_0
+; V6M-NEXT: ands r2, r1
+; V6M-NEXT: ldr r1, [r0, r2]
+; V6M-NEXT: adds r1, r1, #1
+; V6M-NEXT: str r1, [r0, r2]
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI51_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp = load i64, ptr %a1, align 8
+ %tmp1 = lshr i64 %tmp, 21
+ %tmp2 = and i64 %tmp1, 1023
+ %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2
+ %tmp4 = load i32, ptr %tmp3, align 4
+ %tmp5 = add nsw i32 %tmp4, 1
+ store i32 %tmp5, ptr %tmp3, align 4
+ ret void
+}
+
+; The most canonical variant
+define i32 @c0_i32(i32 %arg) nounwind {
+; V7M-LABEL: c0_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r0, #19, #10
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c0_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r0, #19, #10
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c0_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r0, #19, #10
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c0_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r0, #3
+; V6M-NEXT: lsrs r0, r0, #22
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ ret i32 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i32 @c1_i32(i32 %arg) nounwind {
+; V7M-LABEL: c1_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r1, #4092
+; V7M-NEXT: and.w r0, r1, r0, lsr #19
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c1_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r1, #4092
+; V7A-NEXT: and r0, r1, r0, lsr #19
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c1_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r1, #4092
+; V7A-T-NEXT: and.w r0, r1, r0, lsr #19
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c1_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r0, #19
+; V6M-NEXT: ldr r0, .LCPI53_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI53_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 4092
+ ret i32 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i32 @c2_i32(i32 %arg) nounwind {
+; V7M-LABEL: c2_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r1, #4092
+; V7M-NEXT: and.w r0, r1, r0, lsr #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c2_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r1, #4092
+; V7A-NEXT: and r0, r1, r0, lsr #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c2_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r1, #4092
+; V7A-T-NEXT: and.w r0, r1, r0, lsr #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c2_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r0, #17
+; V6M-NEXT: ldr r0, .LCPI54_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI54_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ %tmp2 = shl i32 %tmp1, 2
+ ret i32 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i32 @c4_i32_bad(i32 %arg) nounwind {
+; V7M-LABEL: c4_i32_bad:
+; V7M: @ %bb.0:
+; V7M-NEXT: mvn r1, #1
+; V7M-NEXT: and.w r0, r1, r0, lsr #19
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c4_i32_bad:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r1, #1
+; V7A-NEXT: and r0, r1, r0, lsr #19
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c4_i32_bad:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mvn r1, #1
+; V7A-T-NEXT: and.w r0, r1, r0, lsr #19
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c4_i32_bad:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r0, #20
+; V6M-NEXT: lsls r0, r0, #1
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 16382
+ ret i32 %tmp1
+}
+
+; i64
+
+; The most canonical variant
+define i64 @c0_i64(i64 %arg) nounwind {
+; V7M-LABEL: c0_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r1, #19, #10
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c0_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r1, #19, #10
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c0_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r1, #19, #10
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c0_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r1, #3
+; V6M-NEXT: lsrs r0, r0, #22
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ ret i64 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i64 @c1_i64(i64 %arg) nounwind {
+; V7M-LABEL: c1_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r0, #4092
+; V7M-NEXT: and.w r0, r0, r1, lsr #19
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c1_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r0, #4092
+; V7A-NEXT: and r0, r0, r1, lsr #19
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c1_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r0, #4092
+; V7A-T-NEXT: and.w r0, r0, r1, lsr #19
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c1_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r1, #19
+; V6M-NEXT: ldr r0, .LCPI57_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI57_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 4092
+ ret i64 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i64 @c2_i64(i64 %arg) nounwind {
+; V7M-LABEL: c2_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r0, #4092
+; V7M-NEXT: and.w r0, r0, r1, lsr #17
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c2_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r0, #4092
+; V7A-NEXT: and r0, r0, r1, lsr #17
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c2_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r0, #4092
+; V7A-T-NEXT: and.w r0, r0, r1, lsr #17
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c2_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r1, #17
+; V6M-NEXT: ldr r0, .LCPI58_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI58_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ %tmp2 = shl i64 %tmp1, 2
+ ret i64 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i64 @c4_i64_bad(i64 %arg) nounwind {
+; V7M-LABEL: c4_i64_bad:
+; V7M: @ %bb.0:
+; V7M-NEXT: mvn r0, #1
+; V7M-NEXT: and.w r0, r0, r1, lsr #19
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c4_i64_bad:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r0, #1
+; V7A-NEXT: and r0, r0, r1, lsr #19
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c4_i64_bad:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mvn r0, #1
+; V7A-T-NEXT: and.w r0, r0, r1, lsr #19
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c4_i64_bad:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1, #20
+; V6M-NEXT: lsls r0, r0, #1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 16382
+ ret i64 %tmp1
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant, storing the result afterwards.
+; ---------------------------------------------------------------------------- ;
+
+; i32
+
+; The most canonical variant
+define void @c5_i32(i32 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c5_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r0, #19, #10
+; V7M-NEXT: str r0, [r1]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c5_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r0, #19, #10
+; V7A-NEXT: str r0, [r1]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c5_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r0, #19, #10
+; V7A-T-NEXT: str r0, [r1]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c5_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r0, #3
+; V6M-NEXT: lsrs r0, r0, #22
+; V6M-NEXT: str r0, [r1]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ store i32 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the mask is shifted
+define void @c6_i32(i32 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c6_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r0, #19, #12
+; V7M-NEXT: str r0, [r1]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c6_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r0, #19, #12
+; V7A-NEXT: str r0, [r1]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c6_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r0, #19, #12
+; V7A-T-NEXT: str r0, [r1]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c6_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r0, #1
+; V6M-NEXT: lsrs r0, r0, #20
+; V6M-NEXT: str r0, [r1]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 4095
+ store i32 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define void @c7_i32(i32 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c7_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r2, #4092
+; V7M-NEXT: and.w r0, r2, r0, lsr #17
+; V7M-NEXT: str r0, [r1]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c7_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r2, #4092
+; V7A-NEXT: and r0, r2, r0, lsr #17
+; V7A-NEXT: str r0, [r1]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c7_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r2, #4092
+; V7A-T-NEXT: and.w r0, r2, r0, lsr #17
+; V7A-T-NEXT: str r0, [r1]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c7_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r0, #17
+; V6M-NEXT: ldr r2, .LCPI62_0
+; V6M-NEXT: ands r2, r0
+; V6M-NEXT: str r2, [r1]
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI62_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ %tmp2 = shl i32 %tmp1, 2
+ store i32 %tmp2, ptr %ptr
+ ret void
+}
+
+; i64
+
+; The most canonical variant
+define void @c5_i64(i64 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c5_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r0, #0
+; V7M-NEXT: ubfx r1, r1, #19, #10
+; V7M-NEXT: strd r1, r0, [r2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c5_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r0, #0
+; V7A-NEXT: str r0, [r2, #4]
+; V7A-NEXT: ubfx r0, r1, #19, #10
+; V7A-NEXT: str r0, [r2]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c5_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r0, #0
+; V7A-T-NEXT: ubfx r1, r1, #19, #10
+; V7A-T-NEXT: strd r1, r0, [r2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c5_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: lsls r1, r1, #3
+; V6M-NEXT: lsrs r1, r1, #22
+; V6M-NEXT: str r1, [r2]
+; V6M-NEXT: str r0, [r2, #4]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ store i64 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the mask is shifted
+define void @c6_i64(i64 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c6_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r0, #0
+; V7M-NEXT: ubfx r1, r1, #19, #12
+; V7M-NEXT: strd r1, r0, [r2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c6_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r0, #0
+; V7A-NEXT: str r0, [r2, #4]
+; V7A-NEXT: ubfx r0, r1, #19, #12
+; V7A-NEXT: str r0, [r2]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c6_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r0, #0
+; V7A-T-NEXT: ubfx r1, r1, #19, #12
+; V7A-T-NEXT: strd r1, r0, [r2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c6_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: lsls r1, r1, #1
+; V6M-NEXT: lsrs r1, r1, #20
+; V6M-NEXT: str r1, [r2]
+; V6M-NEXT: str r0, [r2, #4]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 4095
+ store i64 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define void @c7_i64(i64 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c7_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r0, #0
+; V7M-NEXT: movw r3, #4092
+; V7M-NEXT: and.w r1, r3, r1, lsr #17
+; V7M-NEXT: strd r1, r0, [r2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c7_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r0, #4092
+; V7A-NEXT: mov r3, #0
+; V7A-NEXT: and r0, r0, r1, lsr #17
+; V7A-NEXT: stm r2, {r0, r3}
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c7_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r0, #0
+; V7A-T-NEXT: movw r3, #4092
+; V7A-T-NEXT: and.w r1, r3, r1, lsr #17
+; V7A-T-NEXT: strd r1, r0, [r2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c7_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: lsrs r1, r1, #17
+; V6M-NEXT: ldr r3, .LCPI65_0
+; V6M-NEXT: ands r3, r1
+; V6M-NEXT: str r3, [r2]
+; V6M-NEXT: str r0, [r2, #4]
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI65_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ %tmp2 = shl i64 %tmp1, 2
+ store i64 %tmp2, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll
new file mode 100644
index 0000000..b483793
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll
@@ -0,0 +1,2752 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M
+
+; Patterns:
+; a) x & (1 << nbits) - 1
+; b) x & ~(-1 << nbits)
+; c) x & (-1 >> (32 - y))
+; d) x << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+; Check that we don't throw away the vreg_width-1 mask if not using shifts
+define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a0_masked:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: and r2, r2, #63
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a0_masked:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: and r2, r2, #63
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a0_masked:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: and r2, r2, #63
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a0_masked:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #63
+; V6M-NEXT: ands r2, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %numlowbits.masked = and i64 %numlowbits, 63
+ %onebit = shl i64 1, %numlowbits.masked
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: movs r3, #1
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: sbc r1, r1, #0
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: mov r1, #1
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsr r0, r1, r0
+; V7A-NEXT: lslpl r0, r1, r4
+; V7A-NEXT: lsl r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: subs r2, r1, #1
+; V7A-NEXT: sbc r0, r0, #0
+; V7A-NEXT: and r1, r0, r3
+; V7A-NEXT: and r0, r2, r6
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: subs.w r0, r2, #32
+; V7A-T-NEXT: lsr.w r3, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r1, r0
+; V7A-T-NEXT: lsl.w r0, r1, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs r0, #1
+; V7A-T-NEXT: sbc r1, r3, #0
+; V7A-T-NEXT: and.w r0, r0, r12
+; V7A-T-NEXT: and.w r1, r1, lr
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r5, #0
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r2, r0, #1
+; V6M-NEXT: sbcs r1, r5
+; V6M-NEXT: ldm r4!, {r0, r3}
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r2, r1, #32
+; V7M-NEXT: movs r3, #1
+; V7M-NEXT: subs.w r12, r1, #32
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsr.w r2, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: subs r3, r1, #1
+; V7M-NEXT: sbc r1, r2, #0
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: rsb r0, r1, #32
+; V7A-NEXT: subs r4, r1, #32
+; V7A-NEXT: lsl r1, r2, r1
+; V7A-NEXT: lsr r0, r2, r0
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lslpl r0, r2, r4
+; V7A-NEXT: subs r2, r1, #1
+; V7A-NEXT: sbc r0, r0, #0
+; V7A-NEXT: and r1, r0, r3
+; V7A-NEXT: and r0, r2, r6
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r1, #32
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: subs.w r0, r1, #32
+; V7A-T-NEXT: lsr.w r3, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r2, r0
+; V7A-T-NEXT: lsl.w r0, r2, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs r0, #1
+; V7A-T-NEXT: sbc r1, r3, #0
+; V7A-T-NEXT: and.w r0, r0, r12
+; V7A-T-NEXT: and.w r1, r1, lr
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r2, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r5, #0
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r2, r0, #1
+; V6M-NEXT: sbcs r1, r5
+; V6M-NEXT: ldm r4!, {r0, r3}
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r0, r2
+; V7A-NEXT: and r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: bic.w r0, r0, r12
+; V7M-NEXT: bics r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: bic.w r0, r0, r12
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r4, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: bic.w r0, r0, r12
+; V7M-NEXT: bics r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: bic.w r0, r0, r12
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r4, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r1, #-1
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: lsl.w r3, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r1, r12
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: bic.w r1, r2, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r4, [r0]
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: subs r0, r2, #32
+; V7A-NEXT: lsl r2, r1, r2
+; V7A-NEXT: lslpl r1, r1, r0
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r3, r1
+; V7A-NEXT: bic r0, r4, r2
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bzhi64_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: ldrd r0, r12, [r0]
+; V7A-T-NEXT: lsl.w r3, r1, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r1, r2
+; V7A-T-NEXT: bics r0, r3
+; V7A-T-NEXT: bic.w r1, r12, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: bics r2, r0
+; V6M-NEXT: bics r3, r1
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: subs.w r12, r1, #32
+; V7M-NEXT: lsl.w r3, r2, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: ldrd r0, r1, [r0]
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, r12
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: subs r0, r1, #32
+; V7A-NEXT: lsl r4, r2, r1
+; V7A-NEXT: lslpl r2, r2, r0
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: bic r1, r3, r2
+; V7A-NEXT: bic r0, r6, r4
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: ldrd r0, r12, [r0]
+; V7A-T-NEXT: lsl.w r3, r2, r1
+; V7A-T-NEXT: subs r1, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r2, r1
+; V7A-T-NEXT: bics r0, r3
+; V7A-T-NEXT: bic.w r1, r12, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r2, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: bics r2, r0
+; V6M-NEXT: bics r3, r1
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: bic.w r0, r0, r12
+; V7M-NEXT: bics r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: bic.w r0, r0, r12
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r4, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsbs.w lr, r2, #32
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: mov.w r12, #-1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsr.w r2, r12, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsbs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #64
+; V7A-NEXT: mvn r12, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r2, r12, r2
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: and r1, r2, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsbs.w lr, r2, #32
+; V7A-T-NEXT: rsb.w r2, r2, #64
+; V7A-T-NEXT: mov.w r12, #-1
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsr.w r2, r12, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r3, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r2, r0, r2
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: lsr.w r2, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb lr, r2, #64
+; V7A-NEXT: mvn r2, #31
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: uxtb r12, lr
+; V7A-NEXT: uxtab r2, r2, lr
+; V7A-NEXT: lsr r12, r3, r12
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: movwpl r12, #0
+; V7A-NEXT: lsrpl r3, r3, r2
+; V7A-NEXT: and r1, r12, r1
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w lr, r2, #64
+; V7A-T-NEXT: mvn r2, #31
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: uxtb.w r12, lr
+; V7A-T-NEXT: uxtab r2, r2, lr
+; V7A-T-NEXT: lsr.w r12, r3, r12
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r2
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsbs.w r1, r2, #32
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl r3, r1
+; V7M-NEXT: ldrd r0, r1, [r0]
+; V7M-NEXT: mov.w r12, #-1
+; V7M-NEXT: lsr.w r2, r12, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r5, lr}
+; V7A-NEXT: push {r5, lr}
+; V7A-NEXT: rsbs r1, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: mvn r12, #0
+; V7A-NEXT: ldm r0, {r0, r5}
+; V7A-NEXT: lsrpl r3, r3, r1
+; V7A-NEXT: rsb r1, r2, #64
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: lsr r1, r12, r1
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r1, r1, r5
+; V7A-NEXT: pop {r5, pc}
+;
+; V7A-T-LABEL: bzhi64_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsbs.w r1, r2, #32
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: ldrd r0, lr, [r0]
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r1
+; V7A-T-NEXT: rsb.w r1, r2, #64
+; V7A-T-NEXT: mov.w r12, #-1
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: lsr.w r1, r12, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, lr
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r2, r0, r2
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #64
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: subs.w r2, r1, #32
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl r3, r2
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: rsb r1, r1, #64
+; V7A-NEXT: mvn r4, #31
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: uxtb r0, r1
+; V7A-NEXT: uxtab r4, r4, r1
+; V7A-NEXT: lsr r0, r2, r0
+; V7A-NEXT: cmp r4, #0
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: and r1, r0, r3
+; V7A-NEXT: lsrpl r2, r2, r4
+; V7A-NEXT: and r0, r2, r6
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r1, r1, #64
+; V7A-T-NEXT: mvn r3, #31
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: uxtb r0, r1
+; V7A-T-NEXT: uxtab r3, r3, r1
+; V7A-T-NEXT: lsr.w r0, r2, r0
+; V7A-T-NEXT: cmp r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: and.w r1, r0, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r2, r3
+; V7A-T-NEXT: and.w r0, r2, r12
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r1
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsbs.w lr, r2, #32
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: mov.w r12, #-1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsr.w r2, r12, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsbs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #64
+; V7A-NEXT: mvn r12, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r2, r12, r2
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r0, r0, r3
+; V7A-NEXT: and r1, r1, r2
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsbs.w lr, r2, #32
+; V7A-T-NEXT: rsb.w r2, r2, #64
+; V7A-T-NEXT: mov.w r12, #-1
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsr.w r2, r12, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r3, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r2, r0, r2
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %val, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %val, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %val, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %val, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #64
+; V7M-NEXT: rsbs.w r2, r2, #32
+; V7M-NEXT: rsb.w lr, r3, #32
+; V7M-NEXT: lsl.w r12, r1, r3
+; V7M-NEXT: lsr.w r1, r0, lr
+; V7M-NEXT: orr.w r1, r1, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r2
+; V7M-NEXT: lsl.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r12, r1, lr
+; V7M-NEXT: lsr.w r0, r0, r3
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: lsr.w r1, r1, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb lr, r2, #64
+; V7A-NEXT: rsbs r2, r2, #32
+; V7A-NEXT: rsb r12, lr, #32
+; V7A-NEXT: lsr r3, r0, r12
+; V7A-NEXT: orr r1, r3, r1, lsl lr
+; V7A-NEXT: lslpl r1, r0, r2
+; V7A-NEXT: lsl r0, r0, lr
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, lr
+; V7A-NEXT: orr r0, r0, r1, lsl r12
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: lsr r1, r1, lr
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #64
+; V7A-T-NEXT: rsbs.w r2, r2, #32
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsl.w r12, r1, r3
+; V7A-T-NEXT: lsr.w r1, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r12, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %val, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsl.w r12, r1, r2
+; V7M-NEXT: lsr.w r1, r0, r3
+; V7M-NEXT: orr.w r1, r1, r12
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r12
+; V7M-NEXT: lsl.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r12
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb lr, r2, #64
+; V7A-NEXT: uxtb r3, lr
+; V7A-NEXT: rsb r12, r3, #32
+; V7A-NEXT: lsr r2, r0, r12
+; V7A-NEXT: orr r1, r2, r1, lsl r3
+; V7A-NEXT: mvn r2, #31
+; V7A-NEXT: uxtab r2, r2, lr
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lslpl r1, r0, r2
+; V7A-NEXT: lsl r0, r0, r3
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r3
+; V7A-NEXT: orr r0, r0, r1, lsl r12
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: lsr r1, r1, r3
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r2, #64
+; V7A-T-NEXT: mvn r2, #31
+; V7A-T-NEXT: uxtb r3, r4
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsl.w r12, r1, r3
+; V7A-T-NEXT: uxtab r2, r2, r4
+; V7A-T-NEXT: lsr.w r1, r0, lr
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: orr.w r1, r1, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bzhi64_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: uxtb r4, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %val, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r1, r2, #64
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w lr, r1, #32
+; V7M-NEXT: rsbs.w r2, r2, #32
+; V7M-NEXT: lsl.w r12, r3, r1
+; V7M-NEXT: lsr.w r3, r0, lr
+; V7M-NEXT: orr.w r3, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r0, r2
+; V7M-NEXT: lsl.w r0, r0, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r12, r3, lr
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r5, r7, r11, lr}
+; V7A-NEXT: push {r5, r7, r11, lr}
+; V7A-NEXT: rsb r3, r2, #64
+; V7A-NEXT: ldm r0, {r0, r7}
+; V7A-NEXT: rsb r1, r3, #32
+; V7A-NEXT: rsbs r2, r2, #32
+; V7A-NEXT: lsr r5, r0, r1
+; V7A-NEXT: orr r7, r5, r7, lsl r3
+; V7A-NEXT: lslpl r7, r0, r2
+; V7A-NEXT: lsl r0, r0, r3
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r3
+; V7A-NEXT: orr r0, r0, r7, lsl r1
+; V7A-NEXT: lsr r1, r7, r3
+; V7A-NEXT: lsrpl r0, r7, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r5, r7, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #64
+; V7A-T-NEXT: ldrd r0, r1, [r0]
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: rsbs.w r2, r2, #32
+; V7A-T-NEXT: lsl.w r12, r1, r3
+; V7A-T-NEXT: lsr.w r1, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r12, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r4, r1, r2
+; V6M-NEXT: ldr r2, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %val, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #64
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: rsb.w r3, r1, #32
+; V7M-NEXT: lsl.w r12, r2, r1
+; V7M-NEXT: lsr.w r2, r0, r3
+; V7M-NEXT: orr.w r2, r2, r12
+; V7M-NEXT: subs.w r12, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r0, r12
+; V7M-NEXT: lsl.w r0, r0, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r3, r2, r3
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: lsr.w r1, r2, r1
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r2, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r5, r7, r11, lr}
+; V7A-NEXT: push {r5, r7, r11, lr}
+; V7A-NEXT: rsb r1, r1, #64
+; V7A-NEXT: ldm r0, {r0, r7}
+; V7A-NEXT: uxtb r2, r1
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r5, r0, r3
+; V7A-NEXT: orr r7, r5, r7, lsl r2
+; V7A-NEXT: mvn r5, #31
+; V7A-NEXT: uxtab r1, r5, r1
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lslpl r7, r0, r1
+; V7A-NEXT: lsl r0, r0, r2
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: orr r0, r0, r7, lsl r3
+; V7A-NEXT: lsrpl r0, r7, r1
+; V7A-NEXT: lsr r1, r7, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r5, r7, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r1, #64
+; V7A-T-NEXT: ldrd r0, r2, [r0]
+; V7A-T-NEXT: mvn r1, #31
+; V7A-T-NEXT: uxtb r3, r4
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsl.w r12, r2, r3
+; V7A-T-NEXT: uxtab r1, r1, r4
+; V7A-T-NEXT: lsr.w r2, r0, lr
+; V7A-T-NEXT: cmp r1, #0
+; V7A-T-NEXT: orr.w r2, r2, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r2, r0, r1
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r2, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r2, r1
+; V7A-T-NEXT: lsr.w r1, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bzhi64_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r2, #64
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r4, r1
+; V6M-NEXT: ldr r2, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %val, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant mask
+; ---------------------------------------------------------------------------- ;
+
+; 32-bit
+
+define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask32:
+; V7M: @ %bb.0:
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask32:
+; V7A: @ %bb.0:
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask32:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %masked = and i32 %val, 2147483647
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask32_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask32_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask32_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask32_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %val1 = load i32, ptr %val
+ %masked = and i32 %val1, 2147483647
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask16:
+; V7M: @ %bb.0:
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask16:
+; V7A: @ %bb.0:
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask16:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask16:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, .LCPI41_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI41_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %masked = and i32 %val, 32767
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask16_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask16_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask16_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask16_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: ldr r0, .LCPI42_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI42_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %val1 = load i32, ptr %val
+ %masked = and i32 %val1, 32767
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask8:
+; V7M: @ %bb.0:
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask8:
+; V7A: @ %bb.0:
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask8:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask8:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %masked = and i32 %val, 127
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask8_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask8_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask8_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask8_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: movs r0, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %val1 = load i32, ptr %val
+ %masked = and i32 %val1, 127
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask64:
+; V7M: @ %bb.0:
+; V7M-NEXT: bic r1, r1, #-1073741824
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask64:
+; V7A: @ %bb.0:
+; V7A-NEXT: bic r1, r1, #-1073741824
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bic r1, r1, #-1073741824
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #3
+; V6M-NEXT: lsls r2, r2, #30
+; V6M-NEXT: bics r1, r2
+; V6M-NEXT: bx lr
+ %masked = and i64 %val, 4611686018427387903
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask64_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldrd r0, r1, [r0]
+; V7M-NEXT: bic r1, r1, #-1073741824
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask64_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldrd r0, r1, [r0]
+; V7A-NEXT: bic r1, r1, #-1073741824
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask64_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldrd r0, r1, [r0]
+; V7A-T-NEXT: bic r1, r1, #-1073741824
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask64_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #3
+; V6M-NEXT: lsls r3, r1, #30
+; V6M-NEXT: ldr r2, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: bics r1, r3
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: bx lr
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 4611686018427387903
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask32:
+; V7M: @ %bb.0:
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask32:
+; V7A: @ %bb.0:
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask32:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %masked = and i64 %val, 2147483647
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask32_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask32_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask32_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask32_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 2147483647
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask16:
+; V7M: @ %bb.0:
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask16:
+; V7A: @ %bb.0:
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask16:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask16:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, .LCPI49_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI49_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %masked = and i64 %val, 32767
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask16_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask16_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask16_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask16_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: ldr r0, .LCPI50_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI50_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 32767
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask8:
+; V7M: @ %bb.0:
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask8:
+; V7A: @ %bb.0:
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask8:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask8:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %masked = and i64 %val, 127
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask8_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask8_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask8_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask8_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: movs r0, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 127
+ ret i64 %masked
+}
diff --git a/llvm/test/CodeGen/ARM/fpclamptosat.ll b/llvm/test/CodeGen/ARM/fpclamptosat.ll
index 8ab56b2..a6f0a03 100644
--- a/llvm/test/CodeGen/ARM/fpclamptosat.ll
+++ b/llvm/test/CodeGen/ARM/fpclamptosat.ll
@@ -383,8 +383,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; SOFT-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) {
+; SOFT-LABEL: utest_f16i32:
; SOFT: @ %bb.0: @ %entry
; SOFT-NEXT: .save {r7, lr}
; SOFT-NEXT: push {r7, lr}
@@ -400,7 +400,7 @@ define i32 @utesth_f16i32(half %x) {
; SOFT-NEXT: .LBB7_2: @ %entry
; SOFT-NEXT: pop {r7, pc}
;
-; VFP2-LABEL: utesth_f16i32:
+; VFP2-LABEL: utest_f16i32:
; VFP2: @ %bb.0: @ %entry
; VFP2-NEXT: .save {r7, lr}
; VFP2-NEXT: push {r7, lr}
@@ -411,7 +411,7 @@ define i32 @utesth_f16i32(half %x) {
; VFP2-NEXT: vmov r0, s0
; VFP2-NEXT: pop {r7, pc}
;
-; FULL-LABEL: utesth_f16i32:
+; FULL-LABEL: utest_f16i32:
; FULL: @ %bb.0: @ %entry
; FULL-NEXT: vcvt.u32.f16 s0, s0
; FULL-NEXT: vmov r0, s0
@@ -3985,6 +3985,46 @@ entry:
ret i32 %spec.store.select7
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; SOFT-LABEL: ustest_f16i32_nsat:
+; SOFT: @ %bb.0:
+; SOFT-NEXT: .save {r7, lr}
+; SOFT-NEXT: push {r7, lr}
+; SOFT-NEXT: uxth r0, r0
+; SOFT-NEXT: bl __aeabi_h2f
+; SOFT-NEXT: bl __aeabi_f2iz
+; SOFT-NEXT: asrs r1, r0, #31
+; SOFT-NEXT: ands r0, r1
+; SOFT-NEXT: asrs r1, r0, #31
+; SOFT-NEXT: bics r0, r1
+; SOFT-NEXT: pop {r7, pc}
+;
+; VFP2-LABEL: ustest_f16i32_nsat:
+; VFP2: @ %bb.0:
+; VFP2-NEXT: .save {r7, lr}
+; VFP2-NEXT: push {r7, lr}
+; VFP2-NEXT: vmov r0, s0
+; VFP2-NEXT: bl __aeabi_h2f
+; VFP2-NEXT: vmov s0, r0
+; VFP2-NEXT: vcvt.s32.f32 s0, s0
+; VFP2-NEXT: vmov r0, s0
+; VFP2-NEXT: usat r0, #0, r0
+; VFP2-NEXT: pop {r7, pc}
+;
+; FULL-LABEL: ustest_f16i32_nsat:
+; FULL: @ %bb.0:
+; FULL-NEXT: vcvt.s32.f16 s0, s0
+; FULL-NEXT: vmov r0, s0
+; FULL-NEXT: usat r0, #0, r0
+; FULL-NEXT: bx lr
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
index 96f009a..ba31b35 100644
--- a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
@@ -748,8 +748,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i32:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r8, r9, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r8, r9, r11, lr}
@@ -821,7 +821,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-NEON-NEXT: vpop {d12, d13}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r8, r9, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i32:
+; CHECK-FP16-LABEL: utest_f16i32:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, r7, r8, r9, r11, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, r7, r8, r9, r11, lr}
@@ -1366,8 +1366,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i16:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -1441,7 +1441,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i16:
+; CHECK-FP16-LABEL: utest_f16i16:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: vmovx.f16 s4, s0
; CHECK-FP16-NEXT: vcvt.u32.f16 s12, s0
@@ -2109,8 +2109,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i64:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, lr}
@@ -2148,7 +2148,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8}
; CHECK-NEON-NEXT: pop {r4, r5, r6, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i64:
+; CHECK-FP16-LABEL: utest_f16i64:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, lr}
@@ -2835,8 +2835,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i32_mm:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -2881,7 +2881,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i32_mm:
+; CHECK-FP16-LABEL: utest_f16i32_mm:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, lr}
@@ -3344,8 +3344,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i16_mm:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, r7, r11, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, r7, r11, lr}
@@ -3419,7 +3419,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14}
; CHECK-NEON-NEXT: pop {r4, r5, r6, r7, r11, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i16_mm:
+; CHECK-FP16-LABEL: utest_f16i16_mm:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: vmovx.f16 s4, s0
; CHECK-FP16-NEXT: vcvt.u32.f16 s12, s0
@@ -4044,8 +4044,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-NEON-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-NEON-LABEL: utest_f16i64_mm:
; CHECK-NEON: @ %bb.0: @ %entry
; CHECK-NEON-NEXT: .save {r4, r5, r6, lr}
; CHECK-NEON-NEXT: push {r4, r5, r6, lr}
@@ -4083,7 +4083,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-NEON-NEXT: vpop {d8}
; CHECK-NEON-NEXT: pop {r4, r5, r6, pc}
;
-; CHECK-FP16-LABEL: utesth_f16i64_mm:
+; CHECK-FP16-LABEL: utest_f16i64_mm:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r4, r5, r6, lr}
; CHECK-FP16-NEXT: push {r4, r5, r6, lr}
@@ -4215,6 +4215,77 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-NEON-LABEL: ustest_f16i32_nsat:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: .save {r4, lr}
+; CHECK-NEON-NEXT: push {r4, lr}
+; CHECK-NEON-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEON-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: vmov.f32 s16, s3
+; CHECK-NEON-NEXT: vmov.f32 s18, s2
+; CHECK-NEON-NEXT: vmov.f32 s20, s1
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: mov r4, r0
+; CHECK-NEON-NEXT: vmov r0, s16
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: vmov s16, r0
+; CHECK-NEON-NEXT: vmov r0, s18
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: vmov s0, r0
+; CHECK-NEON-NEXT: vmov r1, s20
+; CHECK-NEON-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-NEON-NEXT: vmov s18, r4
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: vmov.32 d11[0], r0
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bl __aeabi_h2f
+; CHECK-NEON-NEXT: vcvt.s32.f32 s2, s18
+; CHECK-NEON-NEXT: vmov s0, r0
+; CHECK-NEON-NEXT: vcvt.s32.f32 s4, s16
+; CHECK-NEON-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-NEON-NEXT: vmov.i32 q8, #0x0
+; CHECK-NEON-NEXT: vmov r0, s2
+; CHECK-NEON-NEXT: vmov.32 d10[0], r0
+; CHECK-NEON-NEXT: vmov r0, s4
+; CHECK-NEON-NEXT: vmov.32 d11[1], r0
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: vmov.32 d10[1], r0
+; CHECK-NEON-NEXT: vmin.s32 q9, q5, q8
+; CHECK-NEON-NEXT: vmax.s32 q0, q9, q8
+; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEON-NEXT: pop {r4, pc}
+;
+; CHECK-FP16-LABEL: ustest_f16i32_nsat:
+; CHECK-FP16: @ %bb.0: @ %entry
+; CHECK-FP16-NEXT: vmovx.f16 s2, s0
+; CHECK-FP16-NEXT: vcvt.s32.f16 s6, s0
+; CHECK-FP16-NEXT: vcvt.s32.f16 s0, s1
+; CHECK-FP16-NEXT: vmovx.f16 s4, s1
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: vcvt.s32.f16 s4, s4
+; CHECK-FP16-NEXT: vcvt.s32.f16 s2, s2
+; CHECK-FP16-NEXT: vmov.i32 q9, #0x0
+; CHECK-FP16-NEXT: vmov.32 d17[0], r0
+; CHECK-FP16-NEXT: vmov r0, s6
+; CHECK-FP16-NEXT: vmov.32 d16[0], r0
+; CHECK-FP16-NEXT: vmov r0, s4
+; CHECK-FP16-NEXT: vmov.32 d17[1], r0
+; CHECK-FP16-NEXT: vmov r0, s2
+; CHECK-FP16-NEXT: vmov.32 d16[1], r0
+; CHECK-FP16-NEXT: vmin.s32 q8, q8, q9
+; CHECK-FP16-NEXT: vmax.s32 q0, q8, q9
+; CHECK-FP16-NEXT: bx lr
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll b/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll
new file mode 100644
index 0000000..8b121c5
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll
@@ -0,0 +1,93 @@
+;; RUN: llc --mtriple=hexagon -mattr=+hvxv79,+hvx-length128b %s -o - | FileCheck %s
+
+define dso_local void @store_isnan_f32(ptr %a, ptr %b, ptr %isnan_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw float, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw float, ptr %b, i32 0
+ %0 = load <32 x float>, ptr %arrayidx_a, align 4
+ %1 = load <32 x float>, ptr %arrayidx_b, align 4
+ %.vectorized = fcmp uno <32 x float> %0, %1
+ %.LS.instance = zext <32 x i1> %.vectorized to <32 x i32>
+ %arrayidx1 = getelementptr inbounds nuw i32, ptr %isnan_cmp, i32 0
+ store <32 x i32> %.LS.instance, ptr %arrayidx1, align 4
+ ret void
+}
+
+; CHECK: store_isnan_f32
+; CHECK: [[RONE32:r[0-9]+]] = #1
+; CHECK: [[VOP2_F32:v[0-9]+]] = vxor([[VOP2_F32]],[[VOP2_F32]])
+; CHECK: [[VOP1_F32:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES32:v[0-9]+]] = vsplat([[RONE32]])
+; CHECK: [[Q1_F32:q[0-9]+]] = vcmp.eq([[VOP1_F32]].w,[[VOP1_F32]].w)
+; CHECK: [[VOP3_F32:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_F32]] &= vcmp.eq([[VOP3_F32]].w,[[VOP3_F32]].w)
+; CHECK: [[VOUT_F32:v[0-9]+]] = vmux([[Q1_F32]],[[VOP2_F32]],[[VONES32]])
+; CHECK: vmemu(r2+#0) = [[VOUT_F32]]
+
+define dso_local void @store_isnan_f16(ptr %a, ptr %b, ptr %isnan_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw half, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw half, ptr %b, i32 0
+ %0 = load <64 x half>, ptr %arrayidx_a, align 2
+ %1 = load <64 x half>, ptr %arrayidx_b, align 2
+ %.vectorized = fcmp uno <64 x half> %0, %1
+ %conv.LS.instance = zext <64 x i1> %.vectorized to <64 x i16>
+ %arrayidx1 = getelementptr inbounds nuw i16, ptr %isnan_cmp, i32 0
+ store <64 x i16> %conv.LS.instance, ptr %arrayidx1, align 2
+ ret void
+}
+; CHECK-LABEL: store_isnan_f16
+; CHECK: [[RONE16:r[0-9]+]] = #1
+; CHECK: [[VOP2_F16:v[0-9]+]] = vxor([[VOP2_F16]],[[VOP2_F16]])
+; CHECK: [[VOP1_F16:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES16:v[0-9]+]].h = vsplat([[RONE16]])
+; CHECK: [[Q1_F16:q[0-9]+]] = vcmp.eq([[VOP1_F16]].h,[[VOP1_F16]].h)
+; CHECK: [[VOP3_F16:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_F16]] &= vcmp.eq([[VOP3_F16]].h,[[VOP3_F16]].h)
+; CHECK: [[VOUT_F16:v[0-9]+]] = vmux([[Q1_F16]],[[VOP2_F16]],[[VONES16]])
+; CHECK: vmemu(r2+#0) = [[VOUT_F32]]
+
+define dso_local void @store_isordered_f32(ptr %a, ptr %b, ptr %isordered_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw float, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw float, ptr %b, i32 0
+ %0 = load <32 x float>, ptr %arrayidx_a, align 4
+ %1 = load <32 x float>, ptr %arrayidx_b, align 4
+ %.vectorized = fcmp ord <32 x float> %0, %1
+ %.LS.instance = zext <32 x i1> %.vectorized to <32 x i32>
+ %arrayidx1 = getelementptr inbounds nuw i32, ptr %isordered_cmp, i32 0
+ store <32 x i32> %.LS.instance, ptr %arrayidx1, align 4
+ ret void
+}
+; CHECK-LABEL: store_isordered_f32
+; CHECK: [[VOP2_ORD_F32:v[0-9]+]] = vxor([[VOP2_ORD_F32]],[[VOP2_ORD_F32]])
+; CHECK: [[VOP1_ORD_F32:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES_ORD_F32:v[0-9]+]] = vsplat([[RONE32]])
+; CHECK: [[Q1_ORD_F32:q[0-9]+]] = vcmp.eq([[VOP1_ORD_F32]].w,[[VOP1_ORD_F32]].w)
+; CHECK: [[VOP3_ORD_F32:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_ORD_F32]] &= vcmp.eq([[VOP3_ORD_F32]].w,[[VOP3_ORD_F32]].w)
+; CHECK: [[VOUT_ORD_F32:v[0-9]+]] = vmux([[Q1_ORD_F32]],[[VONES_ORD_F32]],[[VOP2_ORD_F32]])
+; CHECK: vmemu(r2+#0) = [[VOUT_ORD_F32]]
+
+
+define dso_local void @store_isordered_f16(ptr %a, ptr %b, ptr %isordered_cmp) local_unnamed_addr {
+entry:
+ %arrayidx_a = getelementptr inbounds nuw half, ptr %a, i32 0
+ %arrayidx_b = getelementptr inbounds nuw half, ptr %b, i32 0
+ %0 = load <64 x half>, ptr %arrayidx_a, align 2
+ %1 = load <64 x half>, ptr %arrayidx_b, align 2
+ %.vectorized = fcmp ord <64 x half> %0, %1
+ %conv.LS.instance = zext <64 x i1> %.vectorized to <64 x i16>
+ %arrayidx1 = getelementptr inbounds nuw i16, ptr %isordered_cmp, i32 0
+ store <64 x i16> %conv.LS.instance, ptr %arrayidx1, align 2
+ ret void
+}
+; CHECK-LABEL: store_isordered_f16
+; CHECK: [[VOP2_ORD_F16:v[0-9]+]] = vxor([[VOP2_ORD_F16]],[[VOP2_ORD_F16]])
+; CHECK: [[VOP1_ORD_F16:v[0-9]+]] = vmemu(r0+#0)
+; CHECK: [[VONES_ORD_F16:v[0-9]+]].h = vsplat([[RONE16]])
+; CHECK: [[Q1_ORD_F16:q[0-9]+]] = vcmp.eq([[VOP1_ORD_F16]].h,[[VOP1_ORD_F16]].h)
+; CHECK: [[VOP3_ORD_F16:v[0-9]+]] = vmemu(r1+#0)
+; CHECK: [[Q1_ORD_F16]] &= vcmp.eq([[VOP3_ORD_F16]].h,[[VOP3_ORD_F16]].h)
+; CHECK: [[VOUT_ORD_F16:v[0-9]+]] = vmux([[Q1_ORD_F16]],[[VONES_ORD_F16]],[[VOP2_ORD_F16]])
+; CHECK: vmemu(r2+#0) = [[VOUT_ORD_F16]]
diff --git a/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll b/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll
index 5fa5023..fe0f7dd 100644
--- a/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll
+++ b/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -mtriple=hexagon-unknown-elf < %s | FileCheck %s
; In ISelLowering, when folding nodes (or (shl xx, s), (zext y))
@@ -11,17 +12,18 @@ target triple = "hexagon"
; Function Attrs: nofree nosync nounwind memory(readwrite, inaccessiblemem: none)
define dso_local void @foo(i64* nocapture noundef %buf, i32 %a, i32 %b) local_unnamed_addr {
; CHECK-LABEL: foo:
-; CHECK: // %bb.0: // %entry
+; CHECK: .cfi_startproc
+; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: {
-; CHECK-NEXT: r[[REG0:[0-9]+]] = addasl(r2,r1,#1)
-; CHECK-NEXT: r[[REG2:[0-9]+]] = asl(r1,#1)
+; CHECK-NEXT: r2 = addasl(r2,r1,#1)
+; CHECK-NEXT: r3 = asl(r1,#1)
; CHECK-NEXT: }
; CHECK-NEXT: {
-; CHECK-NEXT: r[[REG1:[0-9]+]] = addasl(r[[REG0]],r1,#1)
+; CHECK-NEXT: r2 = addasl(r2,r1,#1)
; CHECK-NEXT: }
; CHECK-NEXT: {
; CHECK-NEXT: jumpr r31
-; CHECK-NEXT: memd(r0+#8) = r[[REG2]]:[[REG1]]
+; CHECK-NEXT: memd(r0+#8) = r3:2
; CHECK-NEXT: }
entry:
%arrayidx = getelementptr inbounds i64, i64* %buf, i32 1
diff --git a/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll b/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll
new file mode 100644
index 0000000..93ca3a2
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll
@@ -0,0 +1,42 @@
+; Tests lowering of v32i1 to v32f32
+
+; RUN: llc -march=hexagon -mattr=+hvxv79,+hvx-length128b,+hvx-ieee-fp \
+; RUN: -stop-after=hexagon-isel %s -o - | FileCheck %s
+
+define <32 x float> @uitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 {
+; CHECK: name: uitofp_i1
+; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}}
+; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1
+; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]]
+; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]]
+; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]]
+; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]]
+; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]]
+; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]]
+; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]]
+; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]]
+ %q1 = icmp eq <32 x i16> %in0, %in1
+ %fp0 = uitofp <32 x i1> %q1 to <32 x float>
+ %out = fadd <32 x float> %fp0, %fp0
+ ret <32 x float> %out
+}
+
+define <32 x float> @sitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 {
+; CHECK: name: sitofp_i1
+; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}}
+; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1
+; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]]
+; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]]
+; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]]
+; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]]
+; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]]
+; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]]
+; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]]
+; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]]
+ %q1 = icmp eq <32 x i16> %in0, %in1
+ %fp0 = sitofp <32 x i1> %q1 to <32 x float>
+ %out = fadd <32 x float> %fp0, %fp0
+ ret <32 x float> %out
+}
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv79" "target-features"="+hvxv79,+hvx-length128b" }
diff --git a/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll b/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll
deleted file mode 100644
index dfb2bc83..0000000
--- a/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; Tests lowering of v32i1 to v32f32
-
-; RUN: llc -march=hexagon -mattr=+hvxv79,+hvx-length128b,+hvx-ieee-fp \
-; RUN: -stop-after=hexagon-isel %s -o - | FileCheck %s
-
-; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}}
-; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1
-; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]]
-; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]]
-; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]]
-; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]]
-; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]]
-; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]]
-; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]]
-; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]]
-
-define <32 x float> @uitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0
-{
- %q1 = icmp eq <32 x i16> %in0, %in1
- %fp0 = uitofp <32 x i1> %q1 to <32 x float>
- %out = fadd <32 x float> %fp0, %fp0
- ret <32 x float> %out
-}
-
-attributes #0 = { nounwind readnone "target-cpu"="hexagonv79" "target-features"="+hvxv79,+hvx-length128b" }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
index 1469d49..4b0acda 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
@@ -1420,3 +1420,61 @@ define double @tanh_f64(double %a) nounwind {
%1 = call double @llvm.tanh.f64(double %a)
ret double %1
}
+
+define { double, double } @test_modf_f64(double %a) nounwind {
+; RV32IFD-LABEL: test_modf_f64:
+; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: mv a0, sp
+; RV32IFD-NEXT: call modf
+; RV32IFD-NEXT: fld fa1, 0(sp)
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
+; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: test_modf_f64:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT: mv a0, sp
+; RV64IFD-NEXT: call modf
+; RV64IFD-NEXT: fld fa1, 0(sp)
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
+; RV64IFD-NEXT: ret
+;
+; RV32I-LABEL: test_modf_f64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: mv a2, sp
+; RV32I-NEXT: call modf
+; RV32I-NEXT: lw a2, 0(sp)
+; RV32I-NEXT: lw a3, 4(sp)
+; RV32I-NEXT: sw a0, 0(s0)
+; RV32I-NEXT: sw a1, 4(s0)
+; RV32I-NEXT: sw a2, 8(s0)
+; RV32I-NEXT: sw a3, 12(s0)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_modf_f64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: mv a1, sp
+; RV64I-NEXT: call modf
+; RV64I-NEXT: ld a1, 0(sp)
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ ret { double, double } %result
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
index 23f660b..01d9ceb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
@@ -2118,3 +2118,62 @@ define float @tanh_f32(float %a) nounwind {
%1 = call float @llvm.tanh.f32(float %a)
ret float %1
}
+
+define { float, float } @test_modf_f32(float %a) nounwind {
+; RV32IF-LABEL: test_modf_f32:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT: addi a0, sp, 8
+; RV32IF-NEXT: call modff
+; RV32IF-NEXT: flw fa1, 8(sp)
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
+; RV32IF-NEXT: ret
+;
+; RV64IF-LABEL: test_modf_f32:
+; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT: addi a0, sp, 4
+; RV64IF-NEXT: call modff
+; RV64IF-NEXT: flw fa1, 4(sp)
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
+; RV64IF-NEXT: ret
+;
+; RV64IFD-LABEL: test_modf_f32:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT: addi a0, sp, 4
+; RV64IFD-NEXT: call modff
+; RV64IFD-NEXT: flw fa1, 4(sp)
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
+; RV64IFD-NEXT: ret
+;
+; RV32I-LABEL: test_modf_f32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi a1, sp, 8
+; RV32I-NEXT: call modff
+; RV32I-NEXT: lw a1, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_modf_f32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi a1, sp, 4
+; RV64I-NEXT: call modff
+; RV64I-NEXT: lw a1, 4(sp)
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+ %result = call { float, float } @llvm.modf.f32(float %a)
+ ret { float, float } %result
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
index ae9b6cc..e7a3f79 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
@@ -911,3 +911,28 @@ define fp128 @tanh(fp128 %a) nounwind {
%1 = call fp128 @llvm.tanh.f128(fp128 %a)
ret fp128 %1
}
+
+define { fp128, fp128 } @modf(fp128 %a) nounwind {
+; CHECK-LABEL: modf:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -32
+; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT: mv s0, a0
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a2, sp
+; CHECK-NEXT: call modfl
+; CHECK-NEXT: ld a2, 0(sp)
+; CHECK-NEXT: ld a3, 8(sp)
+; CHECK-NEXT: sd a0, 0(s0)
+; CHECK-NEXT: sd a1, 8(s0)
+; CHECK-NEXT: sd a2, 16(s0)
+; CHECK-NEXT: sd a3, 24(s0)
+; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT: addi sp, sp, 32
+; CHECK-NEXT: ret
+ %result = call { fp128, fp128 } @llvm.modf.f128(fp128 %a)
+ ret { fp128, fp128 } %result
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index f1d17f9f..1361d92 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -506,8 +506,9 @@
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. the first uncovered type index: 1, OK
+# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index caeb6e6..aaa08b5 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -2109,3 +2109,85 @@ define double @tanh_f64(double %a) nounwind {
%1 = call double @llvm.tanh.f64(double %a)
ret double %1
}
+
+define { double, double } @test_modf_f64(double %a) nounwind {
+; RV32IFD-LABEL: test_modf_f64:
+; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: mv a0, sp
+; RV32IFD-NEXT: call modf
+; RV32IFD-NEXT: fld fa1, 0(sp)
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
+; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: test_modf_f64:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT: mv a0, sp
+; RV64IFD-NEXT: call modf
+; RV64IFD-NEXT: fld fa1, 0(sp)
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
+; RV64IFD-NEXT: ret
+;
+; RV32IZFINXZDINX-LABEL: test_modf_f64:
+; RV32IZFINXZDINX: # %bb.0:
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: mv a2, sp
+; RV32IZFINXZDINX-NEXT: call modf
+; RV32IZFINXZDINX-NEXT: lw a2, 0(sp)
+; RV32IZFINXZDINX-NEXT: lw a3, 4(sp)
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: ret
+;
+; RV64IZFINXZDINX-LABEL: test_modf_f64:
+; RV64IZFINXZDINX: # %bb.0:
+; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT: mv a1, sp
+; RV64IZFINXZDINX-NEXT: call modf
+; RV64IZFINXZDINX-NEXT: ld a1, 0(sp)
+; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT: ret
+;
+; RV32I-LABEL: test_modf_f64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: mv a3, a2
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: mv a2, sp
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: call modf
+; RV32I-NEXT: lw a2, 0(sp)
+; RV32I-NEXT: lw a3, 4(sp)
+; RV32I-NEXT: sw a0, 0(s0)
+; RV32I-NEXT: sw a1, 4(s0)
+; RV32I-NEXT: sw a2, 8(s0)
+; RV32I-NEXT: sw a3, 12(s0)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_modf_f64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: mv a1, sp
+; RV64I-NEXT: call modf
+; RV64I-NEXT: ld a1, 0(sp)
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ ret { double, double } %result
+}
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index b1230ae..5f673ac 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -3050,3 +3050,84 @@ define float @tanh_f32(float %a) nounwind {
%1 = call float @llvm.tanh.f32(float %a)
ret float %1
}
+
+define { float, float } @test_modf_f32(float %a) nounwind {
+; RV32IF-LABEL: test_modf_f32:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT: addi a0, sp, 8
+; RV32IF-NEXT: call modff
+; RV32IF-NEXT: flw fa1, 8(sp)
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
+; RV32IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_modf_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: addi a1, sp, 8
+; RV32IZFINX-NEXT: call modff
+; RV32IZFINX-NEXT: lw a1, 8(sp)
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IF-LABEL: test_modf_f32:
+; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT: addi a0, sp, 4
+; RV64IF-NEXT: call modff
+; RV64IF-NEXT: flw fa1, 4(sp)
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
+; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_modf_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: addi a1, sp, 4
+; RV64IZFINX-NEXT: call modff
+; RV64IZFINX-NEXT: lw a1, 4(sp)
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
+; RV64IFD-LABEL: test_modf_f32:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT: addi a0, sp, 4
+; RV64IFD-NEXT: call modff
+; RV64IFD-NEXT: flw fa1, 4(sp)
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
+; RV64IFD-NEXT: ret
+;
+; RV32I-LABEL: test_modf_f32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi a1, sp, 8
+; RV32I-NEXT: call modff
+; RV32I-NEXT: lw a1, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_modf_f32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi a1, sp, 4
+; RV64I-NEXT: call modff
+; RV64I-NEXT: lw a1, 4(sp)
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+ %result = call { float, float } @llvm.modf.f32(float %a)
+ ret { float, float } %result
+}
diff --git a/llvm/test/CodeGen/RISCV/fp128.ll b/llvm/test/CodeGen/RISCV/fp128.ll
index a8e26f7..704cb425 100644
--- a/llvm/test/CodeGen/RISCV/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/fp128.ll
@@ -189,3 +189,46 @@ define fp128 @fminimum(fp128 %x, fp128 %y) {
%a = call fp128 @llvm.minimum.fp128(fp128 %x, fp128 %y)
ret fp128 %a
}
+
+define { fp128, fp128 } @modf(fp128 %a) nounwind {
+; RV32I-LABEL: modf:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: lw a4, 4(a1)
+; RV32I-NEXT: lw a5, 8(a1)
+; RV32I-NEXT: lw a6, 12(a1)
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: addi a0, sp, 24
+; RV32I-NEXT: addi a1, sp, 8
+; RV32I-NEXT: addi a2, sp, 40
+; RV32I-NEXT: sw a3, 8(sp)
+; RV32I-NEXT: sw a4, 12(sp)
+; RV32I-NEXT: sw a5, 16(sp)
+; RV32I-NEXT: sw a6, 20(sp)
+; RV32I-NEXT: call modfl
+; RV32I-NEXT: lw a0, 24(sp)
+; RV32I-NEXT: lw a1, 28(sp)
+; RV32I-NEXT: lw a2, 32(sp)
+; RV32I-NEXT: lw a3, 36(sp)
+; RV32I-NEXT: lw a4, 40(sp)
+; RV32I-NEXT: lw a5, 44(sp)
+; RV32I-NEXT: lw a6, 48(sp)
+; RV32I-NEXT: lw a7, 52(sp)
+; RV32I-NEXT: sw a4, 16(s0)
+; RV32I-NEXT: sw a5, 20(s0)
+; RV32I-NEXT: sw a6, 24(s0)
+; RV32I-NEXT: sw a7, 28(s0)
+; RV32I-NEXT: sw a0, 0(s0)
+; RV32I-NEXT: sw a1, 4(s0)
+; RV32I-NEXT: sw a2, 8(s0)
+; RV32I-NEXT: sw a3, 12(s0)
+; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+ %result = call { fp128, fp128 } @llvm.modf.f128(fp128 %a)
+ ret { fp128, fp128 } %result
+}
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index 18d071c..a0d1ecc 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -436,8 +436,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; RV32-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) {
+; RV32-LABEL: utest_f16i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
@@ -456,7 +456,7 @@ define i32 @utesth_f16i32(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: utesth_f16i32:
+; RV64-LABEL: utest_f16i32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
@@ -974,8 +974,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) {
-; RV32-LABEL: utesth_f16i16:
+define i16 @utest_f16i16(half %x) {
+; RV32-LABEL: utest_f16i16:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
@@ -995,7 +995,7 @@ define i16 @utesth_f16i16(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: utesth_f16i16:
+; RV64-LABEL: utest_f16i16:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
@@ -3829,6 +3829,52 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; RV32-LABEL: ustest_f16i32_nsat:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: srai a1, a0, 31
+; RV32-NEXT: and a0, a1, a0
+; RV32-NEXT: sgtz a1, a0
+; RV32-NEXT: neg a1, a1
+; RV32-NEXT: and a0, a1, a0
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ustest_f16i32_nsat:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: fcvt.l.s a0, fa0, rtz
+; RV64-NEXT: srai a1, a0, 63
+; RV64-NEXT: and a0, a1, a0
+; RV64-NEXT: sgtz a1, a0
+; RV64-NEXT: neg a1, a1
+; RV64-NEXT: and a0, a1, a0
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index e16d788..847054d 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -4417,3 +4417,161 @@ define half @tanh_f16(half %a) nounwind {
%1 = call half @llvm.tanh.f16(half %a)
ret half %1
}
+
+define { half, half } @test_modf_f16(half %a) nounwind {
+; RV32IZFH-LABEL: test_modf_f16:
+; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT: addi a0, sp, 8
+; RV32IZFH-NEXT: call modff
+; RV32IZFH-NEXT: flw fa5, 8(sp)
+; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT: fcvt.h.s fa1, fa5
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
+; RV32IZFH-NEXT: ret
+;
+; RV64IZFH-LABEL: test_modf_f16:
+; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
+; RV64IZFH-NEXT: addi a0, sp, 4
+; RV64IZFH-NEXT: call modff
+; RV64IZFH-NEXT: flw fa5, 4(sp)
+; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
+; RV64IZFH-NEXT: fcvt.h.s fa1, fa5
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
+; RV64IZFH-NEXT: ret
+;
+; RV32IZHINX-LABEL: test_modf_f16:
+; RV32IZHINX: # %bb.0:
+; RV32IZHINX-NEXT: addi sp, sp, -16
+; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZHINX-NEXT: fcvt.s.h a0, a0
+; RV32IZHINX-NEXT: addi a1, sp, 8
+; RV32IZHINX-NEXT: call modff
+; RV32IZHINX-NEXT: lw a1, 8(sp)
+; RV32IZHINX-NEXT: fcvt.h.s a0, a0
+; RV32IZHINX-NEXT: fcvt.h.s a1, a1
+; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINX-NEXT: addi sp, sp, 16
+; RV32IZHINX-NEXT: ret
+;
+; RV64IZHINX-LABEL: test_modf_f16:
+; RV64IZHINX: # %bb.0:
+; RV64IZHINX-NEXT: addi sp, sp, -16
+; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT: fcvt.s.h a0, a0
+; RV64IZHINX-NEXT: addi a1, sp, 4
+; RV64IZHINX-NEXT: call modff
+; RV64IZHINX-NEXT: lw a1, 4(sp)
+; RV64IZHINX-NEXT: fcvt.h.s a0, a0
+; RV64IZHINX-NEXT: fcvt.h.s a1, a1
+; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT: addi sp, sp, 16
+; RV64IZHINX-NEXT: ret
+;
+; RV32I-LABEL: test_modf_f16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: addi a1, sp, 4
+; RV32I-NEXT: call modff
+; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: mv s0, a0
+; RV32I-NEXT: lw a0, 4(sp)
+; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_modf_f16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: addi a1, sp, 12
+; RV64I-NEXT: call modff
+; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: mv s0, a0
+; RV64I-NEXT: lw a0, 12(sp)
+; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: mv a1, a0
+; RV64I-NEXT: mv a0, s0
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
+; RV32IZFHMIN-LABEL: test_modf_f16:
+; RV32IZFHMIN: # %bb.0:
+; RV32IZFHMIN-NEXT: addi sp, sp, -16
+; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
+; RV32IZFHMIN-NEXT: addi a0, sp, 8
+; RV32IZFHMIN-NEXT: call modff
+; RV32IZFHMIN-NEXT: flw fa5, 8(sp)
+; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
+; RV32IZFHMIN-NEXT: fcvt.h.s fa1, fa5
+; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFHMIN-NEXT: addi sp, sp, 16
+; RV32IZFHMIN-NEXT: ret
+;
+; RV64IZFHMIN-LABEL: test_modf_f16:
+; RV64IZFHMIN: # %bb.0:
+; RV64IZFHMIN-NEXT: addi sp, sp, -16
+; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
+; RV64IZFHMIN-NEXT: addi a0, sp, 4
+; RV64IZFHMIN-NEXT: call modff
+; RV64IZFHMIN-NEXT: flw fa5, 4(sp)
+; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
+; RV64IZFHMIN-NEXT: fcvt.h.s fa1, fa5
+; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFHMIN-NEXT: addi sp, sp, 16
+; RV64IZFHMIN-NEXT: ret
+;
+; RV32IZHINXMIN-LABEL: test_modf_f16:
+; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: addi sp, sp, -16
+; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
+; RV32IZHINXMIN-NEXT: addi a1, sp, 8
+; RV32IZHINXMIN-NEXT: call modff
+; RV32IZHINXMIN-NEXT: lw a1, 8(sp)
+; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
+; RV32IZHINXMIN-NEXT: fcvt.h.s a1, a1
+; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZHINXMIN-NEXT: addi sp, sp, 16
+; RV32IZHINXMIN-NEXT: ret
+;
+; RV64IZHINXMIN-LABEL: test_modf_f16:
+; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: addi sp, sp, -16
+; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
+; RV64IZHINXMIN-NEXT: addi a1, sp, 4
+; RV64IZHINXMIN-NEXT: call modff
+; RV64IZHINXMIN-NEXT: lw a1, 4(sp)
+; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
+; RV64IZHINXMIN-NEXT: fcvt.h.s a1, a1
+; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINXMIN-NEXT: addi sp, sp, 16
+; RV64IZHINXMIN-NEXT: ret
+ %result = call { half, half } @llvm.modf.f16(half %a)
+ ret { half, half } %result
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index aba9d37..f5977625 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -519,8 +519,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i32:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -64
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 64
@@ -610,7 +610,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-NOV-NEXT: bgeu a3, a1, .LBB7_4
; CHECK-NOV-NEXT: j .LBB7_5
;
-; CHECK-V-LABEL: utesth_f16i32:
+; CHECK-V-LABEL: utest_f16i32:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -48
; CHECK-V-NEXT: .cfi_def_cfa_offset 48
@@ -1594,8 +1594,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i16:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -128
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 128
@@ -1765,7 +1765,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-NOV-NEXT: bgeu a7, a3, .LBB16_8
; CHECK-NOV-NEXT: j .LBB16_9
;
-; CHECK-V-LABEL: utesth_f16i16:
+; CHECK-V-LABEL: utest_f16i16:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -80
; CHECK-V-NEXT: .cfi_def_cfa_offset 80
@@ -3332,8 +3332,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i64:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -32
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 32
@@ -3373,7 +3373,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 0
; CHECK-NOV-NEXT: ret
;
-; CHECK-V-LABEL: utesth_f16i64:
+; CHECK-V-LABEL: utest_f16i64:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -32
; CHECK-V-NEXT: .cfi_def_cfa_offset 32
@@ -4074,8 +4074,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i32_mm:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -64
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 64
@@ -4165,7 +4165,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-NOV-NEXT: bgeu a3, a1, .LBB34_4
; CHECK-NOV-NEXT: j .LBB34_5
;
-; CHECK-V-LABEL: utesth_f16i32_mm:
+; CHECK-V-LABEL: utest_f16i32_mm:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -48
; CHECK-V-NEXT: .cfi_def_cfa_offset 48
@@ -5134,8 +5134,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i16_mm:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -128
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 128
@@ -5305,7 +5305,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-NOV-NEXT: bgeu a7, a3, .LBB43_8
; CHECK-NOV-NEXT: j .LBB43_9
;
-; CHECK-V-LABEL: utesth_f16i16_mm:
+; CHECK-V-LABEL: utest_f16i16_mm:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -80
; CHECK-V-NEXT: .cfi_def_cfa_offset 80
@@ -6837,8 +6837,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-NOV-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-NOV-LABEL: utest_f16i64_mm:
; CHECK-NOV: # %bb.0: # %entry
; CHECK-NOV-NEXT: addi sp, sp, -32
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 32
@@ -6877,7 +6877,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_def_cfa_offset 0
; CHECK-NOV-NEXT: ret
;
-; CHECK-V-LABEL: utesth_f16i64_mm:
+; CHECK-V-LABEL: utest_f16i64_mm:
; CHECK-V: # %bb.0: # %entry
; CHECK-V-NEXT: addi sp, sp, -32
; CHECK-V-NEXT: .cfi_def_cfa_offset 32
@@ -7048,6 +7048,172 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-NOV-LABEL: ustest_f16i32_nsat:
+; CHECK-NOV: # %bb.0: # %entry
+; CHECK-NOV-NEXT: addi sp, sp, -64
+; CHECK-NOV-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NOV-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
+; CHECK-NOV-NEXT: .cfi_offset ra, -8
+; CHECK-NOV-NEXT: .cfi_offset s0, -16
+; CHECK-NOV-NEXT: .cfi_offset s1, -24
+; CHECK-NOV-NEXT: .cfi_offset s2, -32
+; CHECK-NOV-NEXT: .cfi_offset s3, -40
+; CHECK-NOV-NEXT: .cfi_offset fs0, -48
+; CHECK-NOV-NEXT: .cfi_offset fs1, -56
+; CHECK-NOV-NEXT: lhu s1, 0(a1)
+; CHECK-NOV-NEXT: lhu s2, 8(a1)
+; CHECK-NOV-NEXT: lhu a2, 16(a1)
+; CHECK-NOV-NEXT: lhu s3, 24(a1)
+; CHECK-NOV-NEXT: mv s0, a0
+; CHECK-NOV-NEXT: fmv.w.x fa0, a2
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fmv.s fs0, fa0
+; CHECK-NOV-NEXT: fmv.w.x fa0, s2
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fmv.s fs1, fa0
+; CHECK-NOV-NEXT: fmv.w.x fa0, s1
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fcvt.l.s s1, fa0, rtz
+; CHECK-NOV-NEXT: fcvt.l.s s2, fs1, rtz
+; CHECK-NOV-NEXT: fmv.w.x fa0, s3
+; CHECK-NOV-NEXT: fcvt.l.s s3, fs0, rtz
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-NOV-NEXT: srai a1, s3, 63
+; CHECK-NOV-NEXT: and a1, a1, s3
+; CHECK-NOV-NEXT: srai a2, s2, 63
+; CHECK-NOV-NEXT: and a2, a2, s2
+; CHECK-NOV-NEXT: srai a3, s1, 63
+; CHECK-NOV-NEXT: and a3, a3, s1
+; CHECK-NOV-NEXT: srai a4, a0, 63
+; CHECK-NOV-NEXT: and a0, a4, a0
+; CHECK-NOV-NEXT: sgtz a4, a3
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a3, a4, a3
+; CHECK-NOV-NEXT: sgtz a4, a2
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a2, a4, a2
+; CHECK-NOV-NEXT: sgtz a4, a1
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a1, a4, a1
+; CHECK-NOV-NEXT: sgtz a4, a0
+; CHECK-NOV-NEXT: neg a4, a4
+; CHECK-NOV-NEXT: and a0, a4, a0
+; CHECK-NOV-NEXT: sw a3, 0(s0)
+; CHECK-NOV-NEXT: sw a2, 4(s0)
+; CHECK-NOV-NEXT: sw a1, 8(s0)
+; CHECK-NOV-NEXT: sw a0, 12(s0)
+; CHECK-NOV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
+; CHECK-NOV-NEXT: .cfi_restore ra
+; CHECK-NOV-NEXT: .cfi_restore s0
+; CHECK-NOV-NEXT: .cfi_restore s1
+; CHECK-NOV-NEXT: .cfi_restore s2
+; CHECK-NOV-NEXT: .cfi_restore s3
+; CHECK-NOV-NEXT: .cfi_restore fs0
+; CHECK-NOV-NEXT: .cfi_restore fs1
+; CHECK-NOV-NEXT: addi sp, sp, 64
+; CHECK-NOV-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NOV-NEXT: ret
+;
+; CHECK-V-LABEL: ustest_f16i32_nsat:
+; CHECK-V: # %bb.0: # %entry
+; CHECK-V-NEXT: addi sp, sp, -48
+; CHECK-V-NEXT: .cfi_def_cfa_offset 48
+; CHECK-V-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; CHECK-V-NEXT: .cfi_offset ra, -8
+; CHECK-V-NEXT: .cfi_offset s0, -16
+; CHECK-V-NEXT: .cfi_offset s1, -24
+; CHECK-V-NEXT: .cfi_offset s2, -32
+; CHECK-V-NEXT: csrr a1, vlenb
+; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: sub sp, sp, a1
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
+; CHECK-V-NEXT: fmv.w.x fa0, a0
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK-V-NEXT: fmv.w.x fa0, s1
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: vmin.vx v8, v8, zero
+; CHECK-V-NEXT: vmax.vx v8, v8, zero
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add sp, sp, a0
+; CHECK-V-NEXT: .cfi_def_cfa sp, 48
+; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-V-NEXT: .cfi_restore ra
+; CHECK-V-NEXT: .cfi_restore s0
+; CHECK-V-NEXT: .cfi_restore s1
+; CHECK-V-NEXT: .cfi_restore s2
+; CHECK-V-NEXT: addi sp, sp, 48
+; CHECK-V-NEXT: .cfi_def_cfa_offset 0
+; CHECK-V-NEXT: ret
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
index 9ccd4f1..767ef7e 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
@@ -184,4 +184,29 @@ entry:
ret i32 %2
}
+define i32 @test_generic_inst(i32 %arg) #0 {
+;CHECK-LABEL: test_generic_inst:
+;CHECK: ! fake_use: {{.*}}
+;CHECK: bne {{.*}}
+;CHECK-NEXT: nop
+ %bar1 = call i32 @bar(i32 %arg)
+ %even = and i32 %bar1, 1
+ %cmp = icmp eq i32 %even, 0
+ ; This shouldn't get reordered into a delay slot
+ call void (...) @llvm.fake.use(i32 %arg)
+ br i1 %cmp, label %true, label %false
+true:
+ %bar2 = call i32 @bar(i32 %bar1)
+ br label %cont
+
+false:
+ %inc = add nsw i32 %bar1, 1
+ br label %cont
+
+cont:
+ %ret = phi i32 [ %bar2, %true ], [ %inc, %false ]
+ ret i32 %ret
+}
+
+declare void @llvm.fake.use(...)
attributes #0 = { nounwind "disable-tail-calls"="true" }
diff --git a/llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll b/llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll
new file mode 100644
index 0000000..8491328
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/ptrcast-bitcast.ll
@@ -0,0 +1,28 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s --match-full-lines
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#v2_uint:]] = OpTypeVector %[[#uint]] 2
+; CHECK-DAG: %[[#double:]] = OpTypeFloat 64
+; CHECK-DAG: %[[#v2_double:]] = OpTypeVector %[[#double]] 2
+; CHECK-DAG: %[[#v4_uint:]] = OpTypeVector %[[#uint]] 4
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+@.str.2 = private unnamed_addr constant [4 x i8] c"Out\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x <2 x i32>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v2i32_12_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str)
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <2 x double>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v2f64_12_1t(i32 0, i32 2, i32 1, i32 0, ptr nonnull @.str.2)
+ %2 = tail call noundef align 8 dereferenceable(8) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v2i32_12_0t(target("spirv.VulkanBuffer", [0 x <2 x i32>], 12, 0) %0, i32 0)
+ %3 = load <2 x i32>, ptr addrspace(11) %2, align 8
+ %4 = tail call noundef align 8 dereferenceable(8) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v2i32_12_0t(target("spirv.VulkanBuffer", [0 x <2 x i32>], 12, 0) %0, i32 1)
+ %5 = load <2 x i32>, ptr addrspace(11) %4, align 8
+; CHECK: %[[#tmp:]] = OpVectorShuffle %[[#v4_uint]] {{%[0-9]+}} {{%[0-9]+}} 0 2 1 3
+ %6 = shufflevector <2 x i32> %3, <2 x i32> %5, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK: %[[#access:]] = OpAccessChain {{.*}}
+ %7 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v2f64_12_1t(target("spirv.VulkanBuffer", [0 x <2 x double>], 12, 1) %1, i32 0)
+; CHECK: %[[#bitcast:]] = OpBitcast %[[#v2_double]] %[[#tmp]]
+; CHECK: OpStore %[[#access]] %[[#bitcast]] Aligned 16
+ store <4 x i32> %6, ptr addrspace(11) %7, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll b/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll
index 137994ce..59f3edc 100644
--- a/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll
+++ b/llvm/test/CodeGen/WebAssembly/fpclamptosat.ll
@@ -136,9 +136,9 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) {
-; CHECK-LABEL: utesth_f16i32:
-; CHECK: .functype utesth_f16i32 (f32) -> (i32)
+define i32 @utest_f16i32(half %x) {
+; CHECK-LABEL: utest_f16i32:
+; CHECK: .functype utest_f16i32 (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -153,9 +153,9 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_cse(half %x) {
-; CHECK-LABEL: utesth_f16i32_cse:
-; CHECK: .functype utesth_f16i32_cse (f32) -> (i32)
+define i32 @utest_f16i32_cse(half %x) {
+; CHECK-LABEL: utest_f16i32_cse:
+; CHECK: .functype utest_f16i32_cse (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -403,9 +403,9 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) {
-; CHECK-LABEL: utesth_f16i16:
-; CHECK: .functype utesth_f16i16 (f32) -> (i32)
+define i16 @utest_f16i16(half %x) {
+; CHECK-LABEL: utest_f16i16:
+; CHECK: .functype utest_f16i16 (f32) -> (i32)
; CHECK-NEXT: .local i32
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
@@ -427,9 +427,9 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_cse(half %x) {
-; CHECK-LABEL: utesth_f16i16_cse:
-; CHECK: .functype utesth_f16i16_cse (f32) -> (i32)
+define i16 @utest_f16i16_cse(half %x) {
+; CHECK-LABEL: utest_f16i16_cse:
+; CHECK: .functype utest_f16i16_cse (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -880,9 +880,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64(half %x) {
-; CHECK-LABEL: utesth_f16i64:
-; CHECK: .functype utesth_f16i64 (f32) -> (i64)
+define i64 @utest_f16i64(half %x) {
+; CHECK-LABEL: utest_f16i64:
+; CHECK: .functype utest_f16i64 (f32) -> (i64)
; CHECK-NEXT: .local i32, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -919,9 +919,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_cse(half %x) {
-; CHECK-LABEL: utesth_f16i64_cse:
-; CHECK: .functype utesth_f16i64_cse (f32) -> (i64)
+define i64 @utest_f16i64_cse(half %x) {
+; CHECK-LABEL: utest_f16i64_cse:
+; CHECK: .functype utest_f16i64_cse (f32) -> (i64)
; CHECK-NEXT: .local i32, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1118,9 +1118,9 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_mm(half %x) {
-; CHECK-LABEL: utesth_f16i32_mm:
-; CHECK: .functype utesth_f16i32_mm (f32) -> (i32)
+define i32 @utest_f16i32_mm(half %x) {
+; CHECK-LABEL: utest_f16i32_mm:
+; CHECK: .functype utest_f16i32_mm (f32) -> (i32)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: call __truncsfhf2
@@ -1353,9 +1353,9 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_mm(half %x) {
-; CHECK-LABEL: utesth_f16i16_mm:
-; CHECK: .functype utesth_f16i16_mm (f32) -> (i32)
+define i16 @utest_f16i16_mm(half %x) {
+; CHECK-LABEL: utest_f16i16_mm:
+; CHECK: .functype utest_f16i16_mm (f32) -> (i32)
; CHECK-NEXT: .local i32
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
@@ -1637,9 +1637,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm(half %x) {
-; CHECK-LABEL: utesth_f16i64_mm:
-; CHECK: .functype utesth_f16i64_mm (f32) -> (i64)
+define i64 @utest_f16i64_mm(half %x) {
+; CHECK-LABEL: utest_f16i64_mm:
+; CHECK: .functype utest_f16i64_mm (f32) -> (i64)
; CHECK-NEXT: .local i32, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1724,9 +1724,9 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm_cse(half %x) {
-; CHECK-LABEL: utesth_f16i64_mm_cse:
-; CHECK: .functype utesth_f16i64_mm_cse (f32) -> (i64)
+define i64 @utest_f16i64_mm_cse(half %x) {
+; CHECK-LABEL: utest_f16i64_mm_cse:
+; CHECK: .functype utest_f16i64_mm_cse (f32) -> (i64)
; CHECK-NEXT: .local i32, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1754,6 +1754,35 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) {
+; CHECK-LABEL: ustest_f16i32_nsat:
+; CHECK: .functype ustest_f16i32_nsat (f32) -> (i32)
+; CHECK-NEXT: .local i32
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: local.tee 1
+; CHECK-NEXT: i32.const 31
+; CHECK-NEXT: i32.shr_s
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: i32.and
+; CHECK-NEXT: local.tee 1
+; CHECK-NEXT: i32.const 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: i32.const 0
+; CHECK-NEXT: i32.gt_s
+; CHECK-NEXT: i32.select
+; CHECK-NEXT: # fallthrough-return
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll b/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll
index 7190e16..52f57dc 100644
--- a/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/WebAssembly/fpclamptosat_vec.ll
@@ -209,9 +209,9 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) {
-; CHECK-LABEL: utesth_f16i32:
-; CHECK: .functype utesth_f16i32 (f32, f32, f32, f32) -> (v128)
+define <4 x i32> @utest_f16i32(<4 x half> %x) {
+; CHECK-LABEL: utest_f16i32:
+; CHECK: .functype utest_f16i32 (f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 1
; CHECK-NEXT: call __truncsfhf2
@@ -513,9 +513,9 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) {
-; CHECK-LABEL: utesth_f16i16:
-; CHECK: .functype utesth_f16i16 (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
+define <8 x i16> @utest_f16i16(<8 x half> %x) {
+; CHECK-LABEL: utest_f16i16:
+; CHECK: .functype utest_f16i16 (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: .local v128
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 5
@@ -1295,9 +1295,9 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) {
-; CHECK-LABEL: utesth_f16i64:
-; CHECK: .functype utesth_f16i64 (f32, f32) -> (v128)
+define <2 x i64> @utest_f16i64(<2 x half> %x) {
+; CHECK-LABEL: utest_f16i64:
+; CHECK: .functype utest_f16i64 (f32, f32) -> (v128)
; CHECK-NEXT: .local i32, i64, i64, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -1649,9 +1649,9 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
-; CHECK-LABEL: utesth_f16i32_mm:
-; CHECK: .functype utesth_f16i32_mm (f32, f32, f32, f32) -> (v128)
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) {
+; CHECK-LABEL: utest_f16i32_mm:
+; CHECK: .functype utest_f16i32_mm (f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 1
; CHECK-NEXT: call __truncsfhf2
@@ -1938,9 +1938,9 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
-; CHECK-LABEL: utesth_f16i16_mm:
-; CHECK: .functype utesth_f16i16_mm (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) {
+; CHECK-LABEL: utest_f16i16_mm:
+; CHECK: .functype utest_f16i16_mm (f32, f32, f32, f32, f32, f32, f32, f32) -> (v128)
; CHECK-NEXT: .local v128
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 5
@@ -2673,9 +2673,9 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
-; CHECK-LABEL: utesth_f16i64_mm:
-; CHECK: .functype utesth_f16i64_mm (f32, f32) -> (v128)
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
+; CHECK-LABEL: utest_f16i64_mm:
+; CHECK: .functype utest_f16i64_mm (f32, f32) -> (v128)
; CHECK-NEXT: .local i32, i64, i64, i64, i64
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get __stack_pointer
@@ -2810,6 +2810,48 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) {
+; CHECK-LABEL: ustest_f16i32_nsat:
+; CHECK: .functype ustest_f16i32_nsat (f32, f32, f32, f32) -> (v128)
+; CHECK-NEXT: .local v128
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: local.set 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.splat
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.replace_lane 1
+; CHECK-NEXT: local.get 2
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.replace_lane 2
+; CHECK-NEXT: local.get 3
+; CHECK-NEXT: call __truncsfhf2
+; CHECK-NEXT: call __extendhfsf2
+; CHECK-NEXT: i32.trunc_sat_f32_s
+; CHECK-NEXT: i32x4.replace_lane 3
+; CHECK-NEXT: v128.const 0, 0, 0, 0
+; CHECK-NEXT: local.tee 4
+; CHECK-NEXT: i32x4.min_s
+; CHECK-NEXT: local.get 4
+; CHECK-NEXT: i32x4.max_s
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index dec829f..44cf4e8 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -911,7 +911,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 16(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: paddb (%rdx), %xmm0
; SSE2-NEXT: movdqa %xmm0, (%rcx)
@@ -1898,7 +1898,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 32(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -4155,7 +4155,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 48(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE2-NEXT: paddb (%rdx), %xmm2
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index 3d4cddb..89b5c33 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -769,7 +769,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: paddb (%rsi), %xmm1
; SSE2-NEXT: movdqa %xmm1, (%rdx)
@@ -1522,7 +1522,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
@@ -3335,7 +3335,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE2-NEXT: paddb (%rsi), %xmm2
diff --git a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
index 983c69d..95c2eda 100644
--- a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -441,10 +441,10 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; CHECK-NEXT: vpsraw $4, %ymm3, %ymm4
; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; CHECK-NEXT: vpsraw $2, %ymm3, %ymm4
-; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; CHECK-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; CHECK-NEXT: vpsraw $1, %ymm3, %ymm4
-; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; CHECK-NEXT: vpsllw $2, %ymm2, %ymm2
; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2
; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -452,10 +452,10 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; CHECK-NEXT: vpsraw $4, %ymm0, %ymm3
; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; CHECK-NEXT: vpsraw $2, %ymm0, %ymm3
-; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; CHECK-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; CHECK-NEXT: vpsraw $1, %ymm0, %ymm3
-; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vpsllw $2, %ymm1, %ymm1
; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0
; CHECK-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/fpclamptosat.ll b/llvm/test/CodeGen/X86/fpclamptosat.ll
index 3f5ec7b..67483be 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat.ll
@@ -161,8 +161,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i32:
+define i32 @utest_f16i32(half %x) nounwind {
+; CHECK-LABEL: utest_f16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -360,8 +360,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i16:
+define i16 @utest_f16i16(half %x) nounwind {
+; CHECK-LABEL: utest_f16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -566,8 +566,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i64:
+define i64 @utest_f16i64(half %x) nounwind {
+; CHECK-LABEL: utest_f16i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __fixunshfti@PLT
@@ -762,8 +762,8 @@ entry:
ret i32 %conv6
}
-define i32 @utesth_f16i32_mm(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i32_mm:
+define i32 @utest_f16i32_mm(half %x) nounwind {
+; CHECK-LABEL: utest_f16i32_mm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -946,8 +946,8 @@ entry:
ret i16 %conv6
}
-define i16 @utesth_f16i16_mm(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i16_mm:
+define i16 @utest_f16i16_mm(half %x) nounwind {
+; CHECK-LABEL: utest_f16i16_mm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __extendhfsf2@PLT
@@ -1131,8 +1131,8 @@ entry:
ret i64 %conv6
}
-define i64 @utesth_f16i64_mm(half %x) nounwind {
-; CHECK-LABEL: utesth_f16i64_mm:
+define i64 @utest_f16i64_mm(half %x) nounwind {
+; CHECK-LABEL: utest_f16i64_mm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq __fixunshfti@PLT
@@ -1170,6 +1170,27 @@ entry:
ret i64 %conv6
}
+; i32 non saturate
+
+define i32 @ustest_f16i32_nsat(half %x) nounwind {
+; CHECK-LABEL: ustest_f16i32_nsat:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq __extendhfsf2@PLT
+; CHECK-NEXT: cvttss2si %xmm0, %ecx
+; CHECK-NEXT: movl %ecx, %eax
+; CHECK-NEXT: sarl $31, %eax
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: andl %ecx, %eax
+; CHECK-NEXT: cmovlel %edx, %eax
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+ %conv = fptosi half %x to i32
+ %spec.store.select = call i32 @llvm.smin.i32(i32 0, i32 %conv)
+ %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0)
+ ret i32 %spec.store.select7
+}
+
declare i32 @llvm.smin.i32(i32, i32)
declare i32 @llvm.smax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
diff --git a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
index 1a2cfd6..991ce33 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
@@ -747,8 +747,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i32:
+define <4 x i32> @utest_f16i32(<4 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i32:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -835,7 +835,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i32:
+; AVX2-LABEL: utest_f16i32:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
@@ -893,7 +893,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i32:
+; AVX512-LABEL: utest_f16i32:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvttps2uqq %ymm0, %zmm0
@@ -1338,8 +1338,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16(<8 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i16:
+define <8 x i16> @utest_f16i16(<8 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i16:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
@@ -1436,7 +1436,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i16:
+; AVX2-LABEL: utest_f16i16:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
@@ -1453,7 +1453,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i16:
+; AVX512-LABEL: utest_f16i16:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX512-NEXT: vcvttps2udq %ymm0, %ymm0
@@ -2456,8 +2456,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64(<2 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i64:
+define <2 x i64> @utest_f16i64(<2 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i64:
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %r14
; SSE-NEXT: pushq %rbx
@@ -2483,7 +2483,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) nounwind {
; SSE-NEXT: popq %r14
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i64:
+; AVX2-LABEL: utest_f16i64:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -2508,7 +2508,7 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) nounwind {
; AVX2-NEXT: popq %r14
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i64:
+; AVX512-LABEL: utest_f16i64:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
@@ -3359,8 +3359,8 @@ entry:
ret <4 x i32> %conv6
}
-define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i32_mm:
+define <4 x i32> @utest_f16i32_mm(<4 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i32_mm:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -3447,7 +3447,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i32_mm:
+; AVX2-LABEL: utest_f16i32_mm:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
@@ -3505,7 +3505,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i32_mm:
+; AVX512-LABEL: utest_f16i32_mm:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvttps2uqq %ymm0, %zmm0
@@ -3935,8 +3935,8 @@ entry:
ret <8 x i16> %conv6
}
-define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i16_mm:
+define <8 x i16> @utest_f16i16_mm(<8 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i16_mm:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $72, %rsp
; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
@@ -4033,7 +4033,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) nounwind {
; SSE-NEXT: addq $72, %rsp
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i16_mm:
+; AVX2-LABEL: utest_f16i16_mm:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
@@ -4050,7 +4050,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i16_mm:
+; AVX512-LABEL: utest_f16i16_mm:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
; AVX512-NEXT: vcvttps2udq %ymm0, %ymm0
@@ -4820,8 +4820,8 @@ entry:
ret <2 x i64> %conv6
}
-define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) nounwind {
-; SSE-LABEL: utesth_f16i64_mm:
+define <2 x i64> @utest_f16i64_mm(<2 x half> %x) nounwind {
+; SSE-LABEL: utest_f16i64_mm:
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %r14
; SSE-NEXT: pushq %rbx
@@ -4847,7 +4847,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) nounwind {
; SSE-NEXT: popq %r14
; SSE-NEXT: retq
;
-; AVX2-LABEL: utesth_f16i64_mm:
+; AVX2-LABEL: utest_f16i64_mm:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -4872,7 +4872,7 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) nounwind {
; AVX2-NEXT: popq %r14
; AVX2-NEXT: retq
;
-; AVX512-LABEL: utesth_f16i64_mm:
+; AVX512-LABEL: utest_f16i64_mm:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
@@ -4974,6 +4974,63 @@ entry:
ret <2 x i64> %conv6
}
+; i32 non saturate
+
+define <4 x i32> @ustest_f16i32_nsat(<4 x half> %x) nounwind {
+; SSE-LABEL: ustest_f16i32_nsat:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: subq $72, %rsp
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrld $16, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
+; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE-NEXT: psrlq $48, %xmm0
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE-NEXT: cvttps2dq %xmm0, %xmm0
+; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: cvttps2dq %xmm1, %xmm0
+; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: addq $72, %rsp
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ustest_f16i32_nsat:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %conv = fptosi <4 x half> %x to <4 x i32>
+ %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> zeroinitializer, <4 x i32> %conv)
+ %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
+ ret <4 x i32> %spec.store.select7
+}
+
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/X86/gfni-shifts.ll b/llvm/test/CodeGen/X86/gfni-shifts.ll
index cd16651..feac3dc 100644
--- a/llvm/test/CodeGen/X86/gfni-shifts.ll
+++ b/llvm/test/CodeGen/X86/gfni-shifts.ll
@@ -166,10 +166,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm3, %xmm4
; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm3, %xmm4
-; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; GFNIAVX1OR2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm3, %xmm4
-; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; GFNIAVX1OR2-NEXT: vpsllw $2, %xmm2, %xmm2
; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm2, %xmm2
; GFNIAVX1OR2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -177,10 +177,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm0, %xmm3
; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm0, %xmm3
-; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm4
+; GFNIAVX1OR2-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm0, %xmm3
-; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; GFNIAVX1OR2-NEXT: vpsllw $2, %xmm1, %xmm1
; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -896,10 +896,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6
; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
+; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7
+; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3
; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3
; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -907,10 +907,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5
; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5
-; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6
+; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5
-; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2
; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
@@ -920,10 +920,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5
; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5
-; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
+; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6
+; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5
-; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3
; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -931,10 +931,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm4
; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm4
-; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5
+; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm4
-; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; GFNIAVX1-NEXT: vpsllw $2, %xmm1, %xmm1
; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
@@ -949,10 +949,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX2-NEXT: vpsraw $4, %ymm3, %ymm4
; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; GFNIAVX2-NEXT: vpsraw $2, %ymm3, %ymm4
-; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; GFNIAVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; GFNIAVX2-NEXT: vpsraw $1, %ymm3, %ymm4
-; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2
; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; GFNIAVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -960,10 +960,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm3
; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm3
-; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm3
-; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; GFNIAVX2-NEXT: vpsllw $2, %ymm1, %ymm1
; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
@@ -977,10 +977,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4
; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; GFNIAVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4
-; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; GFNIAVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4
-; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; GFNIAVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2
; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -988,10 +988,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3
; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3
-; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; GFNIAVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3
-; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1
; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
@@ -2027,10 +2027,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm7, %xmm8
; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7
; GFNIAVX1-NEXT: vpsraw $2, %xmm7, %xmm8
-; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7
+; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm9
+; GFNIAVX1-NEXT: vpblendvb %xmm9, %xmm8, %xmm7, %xmm7
; GFNIAVX1-NEXT: vpsraw $1, %xmm7, %xmm8
-; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT: vpsllw $2, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm5
; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2038,10 +2038,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7
; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7
-; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
-; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
+; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm8
+; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6
; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7
-; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4
; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
@@ -2051,10 +2051,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7
; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6
; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7
-; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6
+; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm8
+; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6
; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7
-; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
+; GFNIAVX1-NEXT: vpsllw $2, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm5
; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2062,10 +2062,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm6
; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0
+; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm7
+; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2
; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm0, %xmm0
@@ -2078,10 +2078,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7
; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7
-; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
-; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
+; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm8
+; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6
; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7
-; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4
; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2089,10 +2089,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6
; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
+; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm7
+; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2
; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm2
; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
@@ -2102,10 +2102,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6
; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
-; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5
+; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm7
+; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5
; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6
-; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm4
; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2113,10 +2113,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX1-NEXT: vpsraw $4, %xmm1, %xmm5
; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1
; GFNIAVX1-NEXT: vpsraw $2, %xmm1, %xmm5
-; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1
+; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6
+; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
; GFNIAVX1-NEXT: vpsraw $1, %xmm1, %xmm5
-; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3
; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1
; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1
@@ -2131,10 +2131,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX2-NEXT: vpsraw $4, %ymm5, %ymm6
; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
; GFNIAVX2-NEXT: vpsraw $2, %ymm5, %ymm6
-; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4
-; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
+; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm7
+; GFNIAVX2-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5
; GFNIAVX2-NEXT: vpsraw $1, %ymm5, %ymm6
-; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4
+; GFNIAVX2-NEXT: vpsllw $2, %ymm4, %ymm4
; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
; GFNIAVX2-NEXT: vpsrlw $8, %ymm4, %ymm4
; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -2142,10 +2142,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm5
; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm5
-; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm6
+; GFNIAVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm5
-; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2
; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; GFNIAVX2-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
@@ -2155,10 +2155,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX2-NEXT: vpsraw $4, %ymm4, %ymm5
; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
; GFNIAVX2-NEXT: vpsraw $2, %ymm4, %ymm5
-; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3
-; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm6
+; GFNIAVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
; GFNIAVX2-NEXT: vpsraw $1, %ymm4, %ymm5
-; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3
+; GFNIAVX2-NEXT: vpsllw $2, %ymm3, %ymm3
; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
; GFNIAVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -2166,10 +2166,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX2-NEXT: vpsraw $4, %ymm1, %ymm4
; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
; GFNIAVX2-NEXT: vpsraw $2, %ymm1, %ymm4
-; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; GFNIAVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
; GFNIAVX2-NEXT: vpsraw $1, %ymm1, %ymm4
-; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2
; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
; GFNIAVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; GFNIAVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
@@ -2185,10 +2185,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX512VL-NEXT: vpsraw $4, %ymm5, %ymm6
; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
; GFNIAVX512VL-NEXT: vpsraw $2, %ymm5, %ymm6
-; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
+; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm7
+; GFNIAVX512VL-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5
; GFNIAVX512VL-NEXT: vpsraw $1, %ymm5, %ymm6
-; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
+; GFNIAVX512VL-NEXT: vpsllw $2, %ymm3, %ymm3
; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3
; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -2196,10 +2196,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5
; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5
-; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
+; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm6
+; GFNIAVX512VL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5
-; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; GFNIAVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2
; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2
; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm2, %ymm2
@@ -2209,10 +2209,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5
; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5
-; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
-; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm6
+; GFNIAVX512VL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5
-; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
+; GFNIAVX512VL-NEXT: vpsllw $2, %ymm3, %ymm3
; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -2220,10 +2220,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm4
; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm4
-; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
+; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm5
+; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm4
-; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; GFNIAVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1
; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
@@ -2239,11 +2239,11 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; GFNIAVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
-; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
+; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5
+; GFNIAVX512BW-NEXT: vpmovb2m %zmm5, %k1
; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; GFNIAVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
-; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
+; GFNIAVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4
; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
@@ -2253,11 +2253,11 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
; GFNIAVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
-; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4
+; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
; GFNIAVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
-; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1
; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
diff --git a/llvm/test/CodeGen/X86/isel-fpclass.ll b/llvm/test/CodeGen/X86/isel-fpclass.ll
index 960bbf5..df04b67 100644
--- a/llvm/test/CodeGen/X86/isel-fpclass.ll
+++ b/llvm/test/CodeGen/X86/isel-fpclass.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86-SDAGISEL
+; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefixes=X64,X64-SDAGISEL
; RUN: llc < %s -mtriple=i686-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X86-FASTISEL
; RUN: llc < %s -mtriple=x86_64-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X64,X64-FASTISEL
+; RUN: llc < %s -mtriple=i686-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X64,X64-GISEL
-; FIXME: We can reuse/delete llvm/test/CodeGen/X86/is_fpclass.ll when all patches are included.
-
-define i1 @isnone_f(float %x) {
-; X86-SDAGISEL-LABEL: isnone_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: xorl %eax, %eax
-; X86-SDAGISEL-NEXT: retl
+define i1 @isnone_f(float %x) nounwind {
+; X86-LABEL: isnone_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
;
; X64-LABEL: isnone_f:
; X64: # %bb.0: # %entry
@@ -28,11 +28,11 @@ entry:
ret i1 %0
}
-define i1 @isany_f(float %x) {
-; X86-SDAGISEL-LABEL: isany_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movb $1, %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @isany_f(float %x) nounwind {
+; X86-LABEL: isany_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb $1, %al
+; X86-NEXT: retl
;
; X64-LABEL: isany_f:
; X64: # %bb.0: # %entry
@@ -50,17 +50,17 @@ entry:
ret i1 %0
}
-define i1 @issignaling_f(float %x) {
-; X86-SDAGISEL-LABEL: issignaling_f:
-; X86-SDAGISEL: # %bb.0:
-; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
-; X86-SDAGISEL-NEXT: setl %cl
-; X86-SDAGISEL-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001
-; X86-SDAGISEL-NEXT: setge %al
-; X86-SDAGISEL-NEXT: andb %cl, %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @issignaling_f(float %x) nounwind {
+; X86-LABEL: issignaling_f:
+; X86: # %bb.0:
+; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-NEXT: setl %cl
+; X86-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001
+; X86-NEXT: setge %al
+; X86-NEXT: andb %cl, %al
+; X86-NEXT: retl
;
; X64-LABEL: issignaling_f:
; X64: # %bb.0:
@@ -76,7 +76,6 @@ define i1 @issignaling_f(float %x) {
; X86-FASTISEL-LABEL: issignaling_f:
; X86-FASTISEL: # %bb.0:
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
@@ -87,20 +86,19 @@ define i1 @issignaling_f(float %x) {
; X86-FASTISEL-NEXT: setge %al
; X86-FASTISEL-NEXT: andb %cl, %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
%a0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1) ; "snan"
ret i1 %a0
}
- define i1 @isquiet_f(float %x) {
-; X86-SDAGISEL-LABEL: isquiet_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
-; X86-SDAGISEL-NEXT: setge %al
-; X86-SDAGISEL-NEXT: retl
+ define i1 @isquiet_f(float %x) nounwind {
+; X86-LABEL: isquiet_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-NEXT: setge %al
+; X86-NEXT: retl
;
; X64-LABEL: isquiet_f:
; X64: # %bb.0: # %entry
@@ -113,7 +111,6 @@ define i1 @issignaling_f(float %x) {
; X86-FASTISEL-LABEL: isquiet_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
@@ -121,21 +118,20 @@ define i1 @issignaling_f(float %x) {
; X86-FASTISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
; X86-FASTISEL-NEXT: setge %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 2) ; "qnan"
ret i1 %0
}
-define i1 @not_isquiet_f(float %x) {
-; X86-SDAGISEL-LABEL: not_isquiet_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
-; X86-SDAGISEL-NEXT: setl %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @not_isquiet_f(float %x) nounwind {
+; X86-LABEL: not_isquiet_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-NEXT: setl %al
+; X86-NEXT: retl
;
; X64-LABEL: not_isquiet_f:
; X64: # %bb.0: # %entry
@@ -148,7 +144,6 @@ define i1 @not_isquiet_f(float %x) {
; X86-FASTISEL-LABEL: not_isquiet_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
@@ -156,21 +151,20 @@ define i1 @not_isquiet_f(float %x) {
; X86-FASTISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
; X86-FASTISEL-NEXT: setl %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1021) ; ~"qnan"
ret i1 %0
}
-define i1 @isinf_f(float %x) {
-; X86-SDAGISEL-LABEL: isinf_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-SDAGISEL-NEXT: sete %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @isinf_f(float %x) nounwind {
+; X86-LABEL: isinf_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: isinf_f:
; X64: # %bb.0: # %entry
@@ -183,7 +177,6 @@ define i1 @isinf_f(float %x) {
; X86-FASTISEL-LABEL: isinf_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
@@ -191,21 +184,20 @@ define i1 @isinf_f(float %x) {
; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
; X86-FASTISEL-NEXT: sete %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 516) ; 0x204 = "inf"
ret i1 %0
}
-define i1 @not_isinf_f(float %x) {
-; X86-SDAGISEL-LABEL: not_isinf_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-SDAGISEL-NEXT: setne %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @not_isinf_f(float %x) nounwind {
+; X86-LABEL: not_isinf_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: not_isinf_f:
; X64: # %bb.0: # %entry
@@ -218,7 +210,6 @@ define i1 @not_isinf_f(float %x) {
; X86-FASTISEL-LABEL: not_isinf_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
@@ -226,19 +217,18 @@ define i1 @not_isinf_f(float %x) {
; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
; X86-FASTISEL-NEXT: setne %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 507) ; ~0x204 = "~inf"
ret i1 %0
}
-define i1 @is_plus_inf_f(float %x) {
-; X86-SDAGISEL-LABEL: is_plus_inf_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
-; X86-SDAGISEL-NEXT: sete %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @is_plus_inf_f(float %x) nounwind {
+; X86-LABEL: is_plus_inf_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: is_plus_inf_f:
; X64: # %bb.0: # %entry
@@ -250,25 +240,23 @@ define i1 @is_plus_inf_f(float %x) {
; X86-FASTISEL-LABEL: is_plus_inf_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: cmpl $2139095040, (%esp) # imm = 0x7F800000
; X86-FASTISEL-NEXT: sete %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 512) ; 0x200 = "+inf"
ret i1 %0
}
-define i1 @is_minus_inf_f(float %x) {
-; X86-SDAGISEL-LABEL: is_minus_inf_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
-; X86-SDAGISEL-NEXT: sete %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @is_minus_inf_f(float %x) nounwind {
+; X86-LABEL: is_minus_inf_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: is_minus_inf_f:
; X64: # %bb.0: # %entry
@@ -280,25 +268,23 @@ define i1 @is_minus_inf_f(float %x) {
; X86-FASTISEL-LABEL: is_minus_inf_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: cmpl $-8388608, (%esp) # imm = 0xFF800000
; X86-FASTISEL-NEXT: sete %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 4) ; "-inf"
ret i1 %0
}
-define i1 @not_is_minus_inf_f(float %x) {
-; X86-SDAGISEL-LABEL: not_is_minus_inf_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
-; X86-SDAGISEL-NEXT: setne %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @not_is_minus_inf_f(float %x) nounwind {
+; X86-LABEL: not_is_minus_inf_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: not_is_minus_inf_f:
; X64: # %bb.0: # %entry
@@ -310,27 +296,25 @@ define i1 @not_is_minus_inf_f(float %x) {
; X86-FASTISEL-LABEL: not_is_minus_inf_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: cmpl $-8388608, (%esp) # imm = 0xFF800000
; X86-FASTISEL-NEXT: setne %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1019) ; ~"-inf"
ret i1 %0
}
-define i1 @isfinite_f(float %x) {
-; X86-SDAGISEL-LABEL: isfinite_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-SDAGISEL-NEXT: setl %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @isfinite_f(float %x) nounwind {
+; X86-LABEL: isfinite_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-NEXT: setl %al
+; X86-NEXT: retl
;
; X64-LABEL: isfinite_f:
; X64: # %bb.0: # %entry
@@ -343,7 +327,6 @@ define i1 @isfinite_f(float %x) {
; X86-FASTISEL-LABEL: isfinite_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
@@ -351,21 +334,20 @@ define i1 @isfinite_f(float %x) {
; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
; X86-FASTISEL-NEXT: setl %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504) ; 0x1f8 = "finite"
ret i1 %0
}
-define i1 @not_isfinite_f(float %x) {
-; X86-SDAGISEL-LABEL: not_isfinite_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-SDAGISEL-NEXT: setge %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @not_isfinite_f(float %x) nounwind {
+; X86-LABEL: not_isfinite_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-NEXT: setge %al
+; X86-NEXT: retl
;
; X64-LABEL: not_isfinite_f:
; X64: # %bb.0: # %entry
@@ -378,7 +360,6 @@ define i1 @not_isfinite_f(float %x) {
; X86-FASTISEL-LABEL: not_isfinite_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
@@ -386,19 +367,18 @@ define i1 @not_isfinite_f(float %x) {
; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
; X86-FASTISEL-NEXT: setge %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 519) ; ~0x1f8 = "~finite"
ret i1 %0
}
-define i1 @is_plus_finite_f(float %x) {
-; X86-SDAGISEL-LABEL: is_plus_finite_f:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
-; X86-SDAGISEL-NEXT: setb %al
-; X86-SDAGISEL-NEXT: retl
+define i1 @is_plus_finite_f(float %x) nounwind {
+; X86-LABEL: is_plus_finite_f:
+; X86: # %bb.0: # %entry
+; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-NEXT: setb %al
+; X86-NEXT: retl
;
; X64-LABEL: is_plus_finite_f:
; X64: # %bb.0: # %entry
@@ -410,13 +390,11 @@ define i1 @is_plus_finite_f(float %x) {
; X86-FASTISEL-LABEL: is_plus_finite_f:
; X86-FASTISEL: # %bb.0: # %entry
; X86-FASTISEL-NEXT: pushl %eax
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8
; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp)
; X86-FASTISEL-NEXT: fstps (%esp)
; X86-FASTISEL-NEXT: cmpl $2139095040, (%esp) # imm = 0x7F800000
; X86-FASTISEL-NEXT: setb %al
; X86-FASTISEL-NEXT: popl %ecx
-; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4
; X86-FASTISEL-NEXT: retl
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 448) ; 0x1c0 = "+finite"
@@ -424,10 +402,10 @@ entry:
}
define i1 @isnone_d(double %x) nounwind {
-; X86-SDAGISEL-LABEL: isnone_d:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: xorl %eax, %eax
-; X86-SDAGISEL-NEXT: retl
+; X86-LABEL: isnone_d:
+; X86: # %bb.0: # %entry
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
;
; X64-LABEL: isnone_d:
; X64: # %bb.0: # %entry
@@ -446,10 +424,10 @@ entry:
}
define i1 @isany_d(double %x) nounwind {
-; X86-SDAGISEL-LABEL: isany_d:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movb $1, %al
-; X86-SDAGISEL-NEXT: retl
+; X86-LABEL: isany_d:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb $1, %al
+; X86-NEXT: retl
;
; X64-LABEL: isany_d:
; X64: # %bb.0: # %entry
@@ -468,10 +446,10 @@ entry:
}
define i1 @isnone_f80(x86_fp80 %x) nounwind {
-; X86-SDAGISEL-LABEL: isnone_f80:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: xorl %eax, %eax
-; X86-SDAGISEL-NEXT: retl
+; X86-LABEL: isnone_f80:
+; X86: # %bb.0: # %entry
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
;
; X64-SDAGISEL-LABEL: isnone_f80:
; X64-SDAGISEL: # %bb.0: # %entry
@@ -491,16 +469,21 @@ define i1 @isnone_f80(x86_fp80 %x) nounwind {
; X64-FASTISEL-NEXT: fstp %st(0)
; X64-FASTISEL-NEXT: xorl %eax, %eax
; X64-FASTISEL-NEXT: retq
+;
+; X64-GISEL-LABEL: isnone_f80:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: xorl %eax, %eax
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 0)
ret i1 %0
}
define i1 @isany_f80(x86_fp80 %x) nounwind {
-; X86-SDAGISEL-LABEL: isany_f80:
-; X86-SDAGISEL: # %bb.0: # %entry
-; X86-SDAGISEL-NEXT: movb $1, %al
-; X86-SDAGISEL-NEXT: retl
+; X86-LABEL: isany_f80:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb $1, %al
+; X86-NEXT: retl
;
; X64-SDAGISEL-LABEL: isany_f80:
; X64-SDAGISEL: # %bb.0: # %entry
@@ -520,6 +503,11 @@ define i1 @isany_f80(x86_fp80 %x) nounwind {
; X64-FASTISEL-NEXT: fstp %st(0)
; X64-FASTISEL-NEXT: movb $1, %al
; X64-FASTISEL-NEXT: retq
+;
+; X64-GISEL-LABEL: isany_f80:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movb $1, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 1023)
ret i1 %0
diff --git a/llvm/test/CodeGen/X86/isel-smax.ll b/llvm/test/CodeGen/X86/isel-smax.ll
index 9c9a48e..1ce0a80 100644
--- a/llvm/test/CodeGen/X86/isel-smax.ll
+++ b/llvm/test/CodeGen/X86/isel-smax.ll
@@ -1,19 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone {
-; X64-LABEL: smax_i8:
-; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: cmpb %al, %dil
-; X64-NEXT: cmovgl %edi, %eax
-; X64-NEXT: # kill: def $al killed $al killed $eax
-; X64-NEXT: retq
+; DAG-X64-LABEL: smax_i8:
+; DAG-X64: # %bb.0:
+; DAG-X64-NEXT: movl %esi, %eax
+; DAG-X64-NEXT: cmpb %al, %dil
+; DAG-X64-NEXT: cmovgl %edi, %eax
+; DAG-X64-NEXT: # kill: def $al killed $al killed $eax
+; DAG-X64-NEXT: retq
;
; FASTISEL-X64-LABEL: smax_i8:
; FASTISEL-X64: # %bb.0:
@@ -24,6 +24,17 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone {
; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax
; FASTISEL-X64-NEXT: retq
;
+; GISEL-X64-LABEL: smax_i8:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %esi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpb %al, %dil
+; GISEL-X64-NEXT: setg %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovnew %di, %ax
+; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X64-NEXT: retq
+;
; X86-LABEL: smax_i8:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
@@ -35,16 +46,20 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone {
; X86-NEXT: .LBB0_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: smax_i8:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpb %cl, %al
-; FASTISEL-X86-NEXT: jg .LBB0_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB0_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: smax_i8:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpb %al, %cl
+; GISEL-X86-NEXT: setg %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB0_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB0_2:
+; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i8 @llvm.smax.i8(i8 %a, i8 %b)
ret i8 %ret
}
@@ -57,25 +72,28 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone {
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: smax_i16:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpw %ax, %di
-; FASTISEL-X64-NEXT: cmovgl %edi, %eax
-; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: smax_i16:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpw %si, %ax
+; GISEL-X64-NEXT: setg %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovew %si, %ax
+; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: smax_i16:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpw %cx, %ax
-; X86-NEXT: jg .LBB1_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: .LBB1_2:
-; X86-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-NEXT: retl
+; DAG-X86-LABEL: smax_i16:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: cmpw %cx, %ax
+; DAG-X86-NEXT: jg .LBB1_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: .LBB1_2:
+; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: smax_i16:
; FASTISEL-X86: # %bb.0:
@@ -88,6 +106,21 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone {
; FASTISEL-X86-NEXT: .LBB1_2:
; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: smax_i16:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpw %ax, %cx
+; GISEL-X86-NEXT: setg %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB1_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB1_2:
+; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i16 @llvm.smax.i16(i16 %a, i16 %b)
ret i16 %ret
}
@@ -99,12 +132,15 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: cmovgl %edi, %eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: smax_i32:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpl %esi, %edi
-; FASTISEL-X64-NEXT: cmovgl %edi, %eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: smax_i32:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpl %esi, %edi
+; GISEL-X64-NEXT: setg %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovel %esi, %eax
+; GISEL-X64-NEXT: retq
;
; X86-LABEL: smax_i32:
; X86: # %bb.0:
@@ -117,16 +153,19 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone {
; X86-NEXT: .LBB2_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: smax_i32:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpl %ecx, %eax
-; FASTISEL-X86-NEXT: jg .LBB2_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB2_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: smax_i32:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpl %eax, %ecx
+; GISEL-X86-NEXT: setg %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB2_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB2_2:
+; GISEL-X86-NEXT: retl
%ret = call i32 @llvm.smax.i32(i32 %a, i32 %b)
ret i32 %ret
}
@@ -138,32 +177,35 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone {
; X64-NEXT: cmovgq %rdi, %rax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: smax_i64:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movq %rsi, %rax
-; FASTISEL-X64-NEXT: cmpq %rsi, %rdi
-; FASTISEL-X64-NEXT: cmovgq %rdi, %rax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: smax_i64:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movq %rdi, %rax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpq %rsi, %rdi
+; GISEL-X64-NEXT: setg %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmoveq %rsi, %rax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: smax_i64:
-; X86: # %bb.0:
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: cmpl %eax, %ecx
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: sbbl %edx, %edi
-; X86-NEXT: jl .LBB3_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: .LBB3_2:
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: retl
+; DAG-X86-LABEL: smax_i64:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: pushl %edi
+; DAG-X86-NEXT: pushl %esi
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; DAG-X86-NEXT: cmpl %eax, %ecx
+; DAG-X86-NEXT: movl %esi, %edi
+; DAG-X86-NEXT: sbbl %edx, %edi
+; DAG-X86-NEXT: jl .LBB3_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: movl %esi, %edx
+; DAG-X86-NEXT: .LBB3_2:
+; DAG-X86-NEXT: popl %esi
+; DAG-X86-NEXT: popl %edi
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: smax_i64:
; FASTISEL-X86: # %bb.0:
@@ -184,6 +226,44 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone {
; FASTISEL-X86-NEXT: popl %esi
; FASTISEL-X86-NEXT: popl %edi
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: smax_i64:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: pushl %ebp
+; GISEL-X86-NEXT: pushl %ebx
+; GISEL-X86-NEXT: pushl %edi
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: cmpl %eax, %esi
+; GISEL-X86-NEXT: seta %bl
+; GISEL-X86-NEXT: xorl %ecx, %ecx
+; GISEL-X86-NEXT: cmpl %edx, %ebp
+; GISEL-X86-NEXT: setg %bh
+; GISEL-X86-NEXT: sete %cl
+; GISEL-X86-NEXT: testl %ecx, %ecx
+; GISEL-X86-NEXT: je .LBB3_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movb %bl, %bh
+; GISEL-X86-NEXT: .LBB3_2:
+; GISEL-X86-NEXT: movzbl %bh, %edi
+; GISEL-X86-NEXT: andl $1, %edi
+; GISEL-X86-NEXT: je .LBB3_4
+; GISEL-X86-NEXT: # %bb.3:
+; GISEL-X86-NEXT: movl %esi, %eax
+; GISEL-X86-NEXT: .LBB3_4:
+; GISEL-X86-NEXT: testl %edi, %edi
+; GISEL-X86-NEXT: je .LBB3_6
+; GISEL-X86-NEXT: # %bb.5:
+; GISEL-X86-NEXT: movl %ebp, %edx
+; GISEL-X86-NEXT: .LBB3_6:
+; GISEL-X86-NEXT: popl %esi
+; GISEL-X86-NEXT: popl %edi
+; GISEL-X86-NEXT: popl %ebx
+; GISEL-X86-NEXT: popl %ebp
+; GISEL-X86-NEXT: retl
%ret = call i64 @llvm.smax.i64(i64 %a, i64 %b)
ret i64 %ret
}
diff --git a/llvm/test/CodeGen/X86/isel-smin.ll b/llvm/test/CodeGen/X86/isel-smin.ll
index 7349a7c..bbed3c3 100644
--- a/llvm/test/CodeGen/X86/isel-smin.ll
+++ b/llvm/test/CodeGen/X86/isel-smin.ll
@@ -1,19 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone {
-; X64-LABEL: smin_i8:
-; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: cmpb %al, %dil
-; X64-NEXT: cmovll %edi, %eax
-; X64-NEXT: # kill: def $al killed $al killed $eax
-; X64-NEXT: retq
+; DAG-X64-LABEL: smin_i8:
+; DAG-X64: # %bb.0:
+; DAG-X64-NEXT: movl %esi, %eax
+; DAG-X64-NEXT: cmpb %al, %dil
+; DAG-X64-NEXT: cmovll %edi, %eax
+; DAG-X64-NEXT: # kill: def $al killed $al killed $eax
+; DAG-X64-NEXT: retq
;
; FASTISEL-X64-LABEL: smin_i8:
; FASTISEL-X64: # %bb.0:
@@ -24,6 +24,17 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone {
; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax
; FASTISEL-X64-NEXT: retq
;
+; GISEL-X64-LABEL: smin_i8:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %esi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpb %al, %dil
+; GISEL-X64-NEXT: setl %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovnew %di, %ax
+; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X64-NEXT: retq
+;
; X86-LABEL: smin_i8:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
@@ -35,16 +46,20 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone {
; X86-NEXT: .LBB0_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: smin_i8:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpb %cl, %al
-; FASTISEL-X86-NEXT: jl .LBB0_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB0_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: smin_i8:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpb %al, %cl
+; GISEL-X86-NEXT: setl %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB0_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB0_2:
+; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i8 @llvm.smin.i8(i8 %a, i8 %b)
ret i8 %ret
}
@@ -57,25 +72,28 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone {
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: smin_i16:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpw %ax, %di
-; FASTISEL-X64-NEXT: cmovll %edi, %eax
-; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: smin_i16:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpw %si, %ax
+; GISEL-X64-NEXT: setl %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovew %si, %ax
+; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: smin_i16:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpw %cx, %ax
-; X86-NEXT: jl .LBB1_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: .LBB1_2:
-; X86-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-NEXT: retl
+; DAG-X86-LABEL: smin_i16:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: cmpw %cx, %ax
+; DAG-X86-NEXT: jl .LBB1_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: .LBB1_2:
+; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: smin_i16:
; FASTISEL-X86: # %bb.0:
@@ -88,6 +106,21 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone {
; FASTISEL-X86-NEXT: .LBB1_2:
; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: smin_i16:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpw %ax, %cx
+; GISEL-X86-NEXT: setl %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB1_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB1_2:
+; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i16 @llvm.smin.i16(i16 %a, i16 %b)
ret i16 %ret
}
@@ -99,12 +132,15 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: cmovll %edi, %eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: smin_i32:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpl %esi, %edi
-; FASTISEL-X64-NEXT: cmovll %edi, %eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: smin_i32:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpl %esi, %edi
+; GISEL-X64-NEXT: setl %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovel %esi, %eax
+; GISEL-X64-NEXT: retq
;
; X86-LABEL: smin_i32:
; X86: # %bb.0:
@@ -117,16 +153,19 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone {
; X86-NEXT: .LBB2_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: smin_i32:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpl %ecx, %eax
-; FASTISEL-X86-NEXT: jl .LBB2_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB2_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: smin_i32:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpl %eax, %ecx
+; GISEL-X86-NEXT: setl %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB2_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB2_2:
+; GISEL-X86-NEXT: retl
%ret = call i32 @llvm.smin.i32(i32 %a, i32 %b)
ret i32 %ret
}
@@ -138,32 +177,35 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone {
; X64-NEXT: cmovlq %rdi, %rax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: smin_i64:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movq %rsi, %rax
-; FASTISEL-X64-NEXT: cmpq %rsi, %rdi
-; FASTISEL-X64-NEXT: cmovlq %rdi, %rax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: smin_i64:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movq %rdi, %rax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpq %rsi, %rdi
+; GISEL-X64-NEXT: setl %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmoveq %rsi, %rax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: smin_i64:
-; X86: # %bb.0:
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: cmpl %ecx, %eax
-; X86-NEXT: movl %edx, %edi
-; X86-NEXT: sbbl %esi, %edi
-; X86-NEXT: jl .LBB3_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: .LBB3_2:
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: retl
+; DAG-X86-LABEL: smin_i64:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: pushl %edi
+; DAG-X86-NEXT: pushl %esi
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; DAG-X86-NEXT: cmpl %ecx, %eax
+; DAG-X86-NEXT: movl %edx, %edi
+; DAG-X86-NEXT: sbbl %esi, %edi
+; DAG-X86-NEXT: jl .LBB3_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: movl %esi, %edx
+; DAG-X86-NEXT: .LBB3_2:
+; DAG-X86-NEXT: popl %esi
+; DAG-X86-NEXT: popl %edi
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: smin_i64:
; FASTISEL-X86: # %bb.0:
@@ -184,6 +226,44 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone {
; FASTISEL-X86-NEXT: popl %esi
; FASTISEL-X86-NEXT: popl %edi
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: smin_i64:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: pushl %ebp
+; GISEL-X86-NEXT: pushl %ebx
+; GISEL-X86-NEXT: pushl %edi
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: cmpl %eax, %esi
+; GISEL-X86-NEXT: setb %bl
+; GISEL-X86-NEXT: xorl %ecx, %ecx
+; GISEL-X86-NEXT: cmpl %edx, %ebp
+; GISEL-X86-NEXT: setl %bh
+; GISEL-X86-NEXT: sete %cl
+; GISEL-X86-NEXT: testl %ecx, %ecx
+; GISEL-X86-NEXT: je .LBB3_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movb %bl, %bh
+; GISEL-X86-NEXT: .LBB3_2:
+; GISEL-X86-NEXT: movzbl %bh, %edi
+; GISEL-X86-NEXT: andl $1, %edi
+; GISEL-X86-NEXT: je .LBB3_4
+; GISEL-X86-NEXT: # %bb.3:
+; GISEL-X86-NEXT: movl %esi, %eax
+; GISEL-X86-NEXT: .LBB3_4:
+; GISEL-X86-NEXT: testl %edi, %edi
+; GISEL-X86-NEXT: je .LBB3_6
+; GISEL-X86-NEXT: # %bb.5:
+; GISEL-X86-NEXT: movl %ebp, %edx
+; GISEL-X86-NEXT: .LBB3_6:
+; GISEL-X86-NEXT: popl %esi
+; GISEL-X86-NEXT: popl %edi
+; GISEL-X86-NEXT: popl %ebx
+; GISEL-X86-NEXT: popl %ebp
+; GISEL-X86-NEXT: retl
%ret = call i64 @llvm.smin.i64(i64 %a, i64 %b)
ret i64 %ret
}
diff --git a/llvm/test/CodeGen/X86/isel-umax.ll b/llvm/test/CodeGen/X86/isel-umax.ll
index a90456c..990af26 100644
--- a/llvm/test/CodeGen/X86/isel-umax.ll
+++ b/llvm/test/CodeGen/X86/isel-umax.ll
@@ -1,19 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone {
-; X64-LABEL: umax_i8:
-; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: cmpb %al, %dil
-; X64-NEXT: cmoval %edi, %eax
-; X64-NEXT: # kill: def $al killed $al killed $eax
-; X64-NEXT: retq
+; DAG-X64-LABEL: umax_i8:
+; DAG-X64: # %bb.0:
+; DAG-X64-NEXT: movl %esi, %eax
+; DAG-X64-NEXT: cmpb %al, %dil
+; DAG-X64-NEXT: cmoval %edi, %eax
+; DAG-X64-NEXT: # kill: def $al killed $al killed $eax
+; DAG-X64-NEXT: retq
;
; FASTISEL-X64-LABEL: umax_i8:
; FASTISEL-X64: # %bb.0:
@@ -24,6 +24,17 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone {
; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax
; FASTISEL-X64-NEXT: retq
;
+; GISEL-X64-LABEL: umax_i8:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %esi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpb %al, %dil
+; GISEL-X64-NEXT: seta %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovnew %di, %ax
+; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X64-NEXT: retq
+;
; X86-LABEL: umax_i8:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
@@ -35,16 +46,20 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone {
; X86-NEXT: .LBB0_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: umax_i8:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpb %cl, %al
-; FASTISEL-X86-NEXT: ja .LBB0_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB0_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: umax_i8:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpb %al, %cl
+; GISEL-X86-NEXT: seta %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB0_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB0_2:
+; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i8 @llvm.umax.i8(i8 %a, i8 %b)
ret i8 %ret
}
@@ -57,25 +72,28 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone {
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: umax_i16:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpw %ax, %di
-; FASTISEL-X64-NEXT: cmoval %edi, %eax
-; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: umax_i16:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpw %si, %ax
+; GISEL-X64-NEXT: seta %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovew %si, %ax
+; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: umax_i16:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpw %cx, %ax
-; X86-NEXT: ja .LBB1_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: .LBB1_2:
-; X86-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-NEXT: retl
+; DAG-X86-LABEL: umax_i16:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: cmpw %cx, %ax
+; DAG-X86-NEXT: ja .LBB1_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: .LBB1_2:
+; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: umax_i16:
; FASTISEL-X86: # %bb.0:
@@ -88,6 +106,21 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone {
; FASTISEL-X86-NEXT: .LBB1_2:
; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: umax_i16:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpw %ax, %cx
+; GISEL-X86-NEXT: seta %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB1_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB1_2:
+; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i16 @llvm.umax.i16(i16 %a, i16 %b)
ret i16 %ret
}
@@ -99,12 +132,15 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: cmoval %edi, %eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: umax_i32:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpl %esi, %edi
-; FASTISEL-X64-NEXT: cmoval %edi, %eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: umax_i32:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpl %esi, %edi
+; GISEL-X64-NEXT: seta %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovel %esi, %eax
+; GISEL-X64-NEXT: retq
;
; X86-LABEL: umax_i32:
; X86: # %bb.0:
@@ -117,16 +153,19 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone {
; X86-NEXT: .LBB2_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: umax_i32:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpl %ecx, %eax
-; FASTISEL-X86-NEXT: ja .LBB2_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB2_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: umax_i32:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpl %eax, %ecx
+; GISEL-X86-NEXT: seta %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB2_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB2_2:
+; GISEL-X86-NEXT: retl
%ret = call i32 @llvm.umax.i32(i32 %a, i32 %b)
ret i32 %ret
}
@@ -138,32 +177,35 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone {
; X64-NEXT: cmovaq %rdi, %rax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: umax_i64:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movq %rsi, %rax
-; FASTISEL-X64-NEXT: cmpq %rsi, %rdi
-; FASTISEL-X64-NEXT: cmovaq %rdi, %rax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: umax_i64:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movq %rdi, %rax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpq %rsi, %rdi
+; GISEL-X64-NEXT: seta %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmoveq %rsi, %rax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: umax_i64:
-; X86: # %bb.0:
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: cmpl %eax, %ecx
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: sbbl %edx, %edi
-; X86-NEXT: jb .LBB3_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: .LBB3_2:
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: retl
+; DAG-X86-LABEL: umax_i64:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: pushl %edi
+; DAG-X86-NEXT: pushl %esi
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; DAG-X86-NEXT: cmpl %eax, %ecx
+; DAG-X86-NEXT: movl %esi, %edi
+; DAG-X86-NEXT: sbbl %edx, %edi
+; DAG-X86-NEXT: jb .LBB3_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: movl %esi, %edx
+; DAG-X86-NEXT: .LBB3_2:
+; DAG-X86-NEXT: popl %esi
+; DAG-X86-NEXT: popl %edi
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: umax_i64:
; FASTISEL-X86: # %bb.0:
@@ -184,6 +226,44 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone {
; FASTISEL-X86-NEXT: popl %esi
; FASTISEL-X86-NEXT: popl %edi
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: umax_i64:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: pushl %ebp
+; GISEL-X86-NEXT: pushl %ebx
+; GISEL-X86-NEXT: pushl %edi
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: cmpl %eax, %esi
+; GISEL-X86-NEXT: seta %bl
+; GISEL-X86-NEXT: xorl %ecx, %ecx
+; GISEL-X86-NEXT: cmpl %edx, %ebp
+; GISEL-X86-NEXT: seta %bh
+; GISEL-X86-NEXT: sete %cl
+; GISEL-X86-NEXT: testl %ecx, %ecx
+; GISEL-X86-NEXT: je .LBB3_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movb %bl, %bh
+; GISEL-X86-NEXT: .LBB3_2:
+; GISEL-X86-NEXT: movzbl %bh, %edi
+; GISEL-X86-NEXT: andl $1, %edi
+; GISEL-X86-NEXT: je .LBB3_4
+; GISEL-X86-NEXT: # %bb.3:
+; GISEL-X86-NEXT: movl %esi, %eax
+; GISEL-X86-NEXT: .LBB3_4:
+; GISEL-X86-NEXT: testl %edi, %edi
+; GISEL-X86-NEXT: je .LBB3_6
+; GISEL-X86-NEXT: # %bb.5:
+; GISEL-X86-NEXT: movl %ebp, %edx
+; GISEL-X86-NEXT: .LBB3_6:
+; GISEL-X86-NEXT: popl %esi
+; GISEL-X86-NEXT: popl %edi
+; GISEL-X86-NEXT: popl %ebx
+; GISEL-X86-NEXT: popl %ebp
+; GISEL-X86-NEXT: retl
%ret = call i64 @llvm.umax.i64(i64 %a, i64 %b)
ret i64 %ret
}
diff --git a/llvm/test/CodeGen/X86/isel-umin.ll b/llvm/test/CodeGen/X86/isel-umin.ll
index 53a0b27..1710b9f 100644
--- a/llvm/test/CodeGen/X86/isel-umin.ll
+++ b/llvm/test/CodeGen/X86/isel-umin.ll
@@ -1,19 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86
-; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone {
-; X64-LABEL: umin_i8:
-; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: cmpb %al, %dil
-; X64-NEXT: cmovbl %edi, %eax
-; X64-NEXT: # kill: def $al killed $al killed $eax
-; X64-NEXT: retq
+; DAG-X64-LABEL: umin_i8:
+; DAG-X64: # %bb.0:
+; DAG-X64-NEXT: movl %esi, %eax
+; DAG-X64-NEXT: cmpb %al, %dil
+; DAG-X64-NEXT: cmovbl %edi, %eax
+; DAG-X64-NEXT: # kill: def $al killed $al killed $eax
+; DAG-X64-NEXT: retq
;
; FASTISEL-X64-LABEL: umin_i8:
; FASTISEL-X64: # %bb.0:
@@ -24,6 +24,17 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone {
; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax
; FASTISEL-X64-NEXT: retq
;
+; GISEL-X64-LABEL: umin_i8:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %esi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpb %al, %dil
+; GISEL-X64-NEXT: setb %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovnew %di, %ax
+; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X64-NEXT: retq
+;
; X86-LABEL: umin_i8:
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
@@ -35,16 +46,20 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone {
; X86-NEXT: .LBB0_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: umin_i8:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpb %cl, %al
-; FASTISEL-X86-NEXT: jb .LBB0_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB0_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: umin_i8:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpb %al, %cl
+; GISEL-X86-NEXT: setb %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB0_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB0_2:
+; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i8 @llvm.umin.i8(i8 %a, i8 %b)
ret i8 %ret
}
@@ -57,25 +72,28 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone {
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: umin_i16:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpw %ax, %di
-; FASTISEL-X64-NEXT: cmovbl %edi, %eax
-; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: umin_i16:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpw %si, %ax
+; GISEL-X64-NEXT: setb %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovew %si, %ax
+; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: umin_i16:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpw %cx, %ax
-; X86-NEXT: jb .LBB1_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: .LBB1_2:
-; X86-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-NEXT: retl
+; DAG-X86-LABEL: umin_i16:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: cmpw %cx, %ax
+; DAG-X86-NEXT: jb .LBB1_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: .LBB1_2:
+; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: umin_i16:
; FASTISEL-X86: # %bb.0:
@@ -88,6 +106,21 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone {
; FASTISEL-X86-NEXT: .LBB1_2:
; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: umin_i16:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpw %ax, %cx
+; GISEL-X86-NEXT: setb %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB1_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB1_2:
+; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax
+; GISEL-X86-NEXT: retl
%ret = call i16 @llvm.umin.i16(i16 %a, i16 %b)
ret i16 %ret
}
@@ -99,12 +132,15 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: cmovbl %edi, %eax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: umin_i32:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movl %esi, %eax
-; FASTISEL-X64-NEXT: cmpl %esi, %edi
-; FASTISEL-X64-NEXT: cmovbl %edi, %eax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: umin_i32:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movl %edi, %eax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpl %esi, %edi
+; GISEL-X64-NEXT: setb %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmovel %esi, %eax
+; GISEL-X64-NEXT: retq
;
; X86-LABEL: umin_i32:
; X86: # %bb.0:
@@ -117,16 +153,19 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone {
; X86-NEXT: .LBB2_2:
; X86-NEXT: retl
;
-; FASTISEL-X86-LABEL: umin_i32:
-; FASTISEL-X86: # %bb.0:
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FASTISEL-X86-NEXT: cmpl %ecx, %eax
-; FASTISEL-X86-NEXT: jb .LBB2_2
-; FASTISEL-X86-NEXT: # %bb.1:
-; FASTISEL-X86-NEXT: movl %ecx, %eax
-; FASTISEL-X86-NEXT: .LBB2_2:
-; FASTISEL-X86-NEXT: retl
+; GISEL-X86-LABEL: umin_i32:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: xorl %edx, %edx
+; GISEL-X86-NEXT: cmpl %eax, %ecx
+; GISEL-X86-NEXT: setb %dl
+; GISEL-X86-NEXT: andl $1, %edx
+; GISEL-X86-NEXT: je .LBB2_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movl %ecx, %eax
+; GISEL-X86-NEXT: .LBB2_2:
+; GISEL-X86-NEXT: retl
%ret = call i32 @llvm.umin.i32(i32 %a, i32 %b)
ret i32 %ret
}
@@ -138,32 +177,35 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone {
; X64-NEXT: cmovbq %rdi, %rax
; X64-NEXT: retq
;
-; FASTISEL-X64-LABEL: umin_i64:
-; FASTISEL-X64: # %bb.0:
-; FASTISEL-X64-NEXT: movq %rsi, %rax
-; FASTISEL-X64-NEXT: cmpq %rsi, %rdi
-; FASTISEL-X64-NEXT: cmovbq %rdi, %rax
-; FASTISEL-X64-NEXT: retq
+; GISEL-X64-LABEL: umin_i64:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: movq %rdi, %rax
+; GISEL-X64-NEXT: xorl %ecx, %ecx
+; GISEL-X64-NEXT: cmpq %rsi, %rdi
+; GISEL-X64-NEXT: setb %cl
+; GISEL-X64-NEXT: andl $1, %ecx
+; GISEL-X64-NEXT: cmoveq %rsi, %rax
+; GISEL-X64-NEXT: retq
;
-; X86-LABEL: umin_i64:
-; X86: # %bb.0:
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: cmpl %ecx, %eax
-; X86-NEXT: movl %edx, %edi
-; X86-NEXT: sbbl %esi, %edi
-; X86-NEXT: jb .LBB3_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: .LBB3_2:
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: retl
+; DAG-X86-LABEL: umin_i64:
+; DAG-X86: # %bb.0:
+; DAG-X86-NEXT: pushl %edi
+; DAG-X86-NEXT: pushl %esi
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; DAG-X86-NEXT: cmpl %ecx, %eax
+; DAG-X86-NEXT: movl %edx, %edi
+; DAG-X86-NEXT: sbbl %esi, %edi
+; DAG-X86-NEXT: jb .LBB3_2
+; DAG-X86-NEXT: # %bb.1:
+; DAG-X86-NEXT: movl %ecx, %eax
+; DAG-X86-NEXT: movl %esi, %edx
+; DAG-X86-NEXT: .LBB3_2:
+; DAG-X86-NEXT: popl %esi
+; DAG-X86-NEXT: popl %edi
+; DAG-X86-NEXT: retl
;
; FASTISEL-X86-LABEL: umin_i64:
; FASTISEL-X86: # %bb.0:
@@ -184,6 +226,44 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone {
; FASTISEL-X86-NEXT: popl %esi
; FASTISEL-X86-NEXT: popl %edi
; FASTISEL-X86-NEXT: retl
+;
+; GISEL-X86-LABEL: umin_i64:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: pushl %ebp
+; GISEL-X86-NEXT: pushl %ebx
+; GISEL-X86-NEXT: pushl %edi
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: cmpl %eax, %esi
+; GISEL-X86-NEXT: setb %bl
+; GISEL-X86-NEXT: xorl %ecx, %ecx
+; GISEL-X86-NEXT: cmpl %edx, %ebp
+; GISEL-X86-NEXT: setb %bh
+; GISEL-X86-NEXT: sete %cl
+; GISEL-X86-NEXT: testl %ecx, %ecx
+; GISEL-X86-NEXT: je .LBB3_2
+; GISEL-X86-NEXT: # %bb.1:
+; GISEL-X86-NEXT: movb %bl, %bh
+; GISEL-X86-NEXT: .LBB3_2:
+; GISEL-X86-NEXT: movzbl %bh, %edi
+; GISEL-X86-NEXT: andl $1, %edi
+; GISEL-X86-NEXT: je .LBB3_4
+; GISEL-X86-NEXT: # %bb.3:
+; GISEL-X86-NEXT: movl %esi, %eax
+; GISEL-X86-NEXT: .LBB3_4:
+; GISEL-X86-NEXT: testl %edi, %edi
+; GISEL-X86-NEXT: je .LBB3_6
+; GISEL-X86-NEXT: # %bb.5:
+; GISEL-X86-NEXT: movl %ebp, %edx
+; GISEL-X86-NEXT: .LBB3_6:
+; GISEL-X86-NEXT: popl %esi
+; GISEL-X86-NEXT: popl %edi
+; GISEL-X86-NEXT: popl %ebx
+; GISEL-X86-NEXT: popl %ebp
+; GISEL-X86-NEXT: retl
%ret = call i64 @llvm.umin.i64(i64 %a, i64 %b)
ret i64 %ret
}
diff --git a/llvm/test/CodeGen/X86/logic-shift.ll b/llvm/test/CodeGen/X86/logic-shift.ll
index 96e63d1..104151c 100644
--- a/llvm/test/CodeGen/X86/logic-shift.ll
+++ b/llvm/test/CodeGen/X86/logic-shift.ll
@@ -129,10 +129,10 @@ define <16 x i8> @or_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, <
; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5
; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5
-; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4
-; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
+; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6
+; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5
-; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4
; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1
; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -140,10 +140,10 @@ define <16 x i8> @or_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, <
; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4
; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4
-; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4
-; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2
; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
@@ -413,10 +413,10 @@ define <16 x i8> @xor_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y,
; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5
; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5
-; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4
-; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
+; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6
+; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5
-; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4
; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1
; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -424,10 +424,10 @@ define <16 x i8> @xor_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y,
; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4
; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4
-; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4
-; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2
; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
@@ -697,10 +697,10 @@ define <16 x i8> @and_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y,
; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5
; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5
-; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4
-; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
+; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6
+; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5
-; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4
; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1
; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -708,10 +708,10 @@ define <16 x i8> @and_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y,
; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4
; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4
-; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4
-; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2
; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/pr161693.ll b/llvm/test/CodeGen/X86/pr161693.ll
new file mode 100644
index 0000000..de8188f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr161693.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
+
+define void @PR161693() #0 {
+; CHECK-LABEL: PR161693:
+; CHECK: # %bb.0: # %start
+; CHECK-NEXT: movzbl (%rax), %eax
+; CHECK-NEXT: andb $-33, %al
+; CHECK-NEXT: addb $-71, %al
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_1: # %loop
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmpb $-6, %al
+; CHECK-NEXT: setb %cl
+; CHECK-NEXT: leal (%rcx,%rcx), %edx
+; CHECK-NEXT: orb %cl, %dl
+; CHECK-NEXT: leal (,%rdx,4), %ecx
+; CHECK-NEXT: orb %dl, %cl
+; CHECK-NEXT: je .LBB0_1
+; CHECK-NEXT: # %bb.2: # %exit
+; CHECK-NEXT: retq
+start:
+ br label %loop
+
+loop:
+ %.val.i.i89 = load <16 x i8>, ptr poison, align 1
+ %.not49.i = icmp ult <16 x i8> zeroinitializer, splat (i8 -10)
+ %i = and <16 x i8> %.val.i.i89, splat (i8 -33)
+ %i1 = add <16 x i8> %i, splat (i8 -71)
+ %.not51.i = icmp ult <16 x i8> %i1, splat (i8 -6)
+ %.not46.i = and <16 x i1> %.not49.i, %.not51.i
+ %i2 = bitcast <16 x i1> %.not46.i to i16
+ %_0.i = icmp eq i16 %i2, 0
+ br i1 %_0.i, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+attributes #0 = { "target-features"="+soft-float" }
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll
index bf04c8d..63bbac12 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll
@@ -302,10 +302,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX256-NEXT: vpsraw $4, %ymm3, %ymm4
; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; AVX256-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; AVX256-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; AVX256-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX256-NEXT: vpsllw $2, %ymm2, %ymm2
; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; AVX256-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX256-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -313,10 +313,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX256-NEXT: vpsraw $4, %ymm0, %ymm3
; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX256-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; AVX256-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; AVX256-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX256-NEXT: vpsllw $2, %ymm1, %ymm1
; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX256-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX256-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
@@ -338,10 +338,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; AVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; AVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; AVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -349,10 +349,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1
; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
@@ -432,10 +432,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX256VL-NEXT: vpsraw $4, %xmm3, %xmm4
; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
; AVX256VL-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; AVX256VL-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; AVX256VL-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX256VL-NEXT: vpsllw $2, %xmm2, %xmm2
; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; AVX256VL-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX256VL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -443,10 +443,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX256VL-NEXT: vpsraw $4, %xmm0, %xmm3
; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX256VL-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm4
+; AVX256VL-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
; AVX256VL-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT: vpsllw $2, %xmm1, %xmm1
; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX256VL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll
index 9c8729b3..4b8f78d 100644
--- a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll
+++ b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll
@@ -15,20 +15,20 @@ define <4 x i32> @shuf_rot_v4i32_1032(<4 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_rot_v4i32_1032:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
+; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_rot_v4i32_1032:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
+; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_rot_v4i32_1032:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
+; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <4 x i32> %x, %x
%r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -44,20 +44,20 @@ define <8 x i32> @shuf_rot_v8i32_10325476(<8 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_rot_v8i32_10325476:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
+; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_rot_v8i32_10325476:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
+; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_rot_v8i32_10325476:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
+; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <8 x i32> %x, %x
%r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -73,20 +73,20 @@ define <16 x i32> @shuf_rot_v16i32_1032547698111013121514(<16 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_rot_v16i32_1032547698111013121514:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_rot_v16i32_1032547698111013121514:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_rot_v16i32_1032547698111013121514:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <16 x i32> %x, %x
%r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
@@ -168,20 +168,20 @@ define <4 x i32> @shuf_shr_v4i32_1U3U(<4 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_shr_v4i32_1U3U:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_shr_v4i32_1U3U:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_shr_v4i32_1U3U:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <4 x i32> %x, %x
%r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
@@ -197,20 +197,20 @@ define <8 x i32> @shuf_shr_v8i32_1U3U5U7U(<8 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_shr_v8i32_1U3U5U7U:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_shr_v8i32_1U3U5U7U:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_shr_v8i32_1U3U5U7U:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <8 x i32> %x, %x
%r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7, i32 undef>
@@ -226,20 +226,20 @@ define <16 x i32> @shuf_shr_v16i32_U3U5U7U9U11U13U15(<16 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <16 x i32> %x, %x
%r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7, i32 undef, i32 9, i32 undef, i32 11, i32 undef, i32 13, i32 undef, i32 15, i32 undef>
@@ -288,20 +288,20 @@ define <4 x i32> @shuf_shl_v4i32_U0U2(<4 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_shl_v4i32_U0U2:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
+; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_shl_v4i32_U0U2:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
+; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_shl_v4i32_U0U2:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
+; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <4 x i32> %x, %x
%r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 undef, i32 0, i32 undef, i32 2>
@@ -317,20 +317,20 @@ define <8 x i32> @shuf_shl_v8i32_U0U2U4U6(<8 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_shl_v8i32_U0U2U4U6:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_shl_v8i32_U0U2U4U6:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_shl_v8i32_U0U2U4U6:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <8 x i32> %x, %x
%r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 undef, i32 4, i32 undef, i32 6>
@@ -346,20 +346,20 @@ define <16 x i32> @shuf_shl_v16i32_U0U2U4U6U8U10U12U14(<16 x i32> %x) {
;
; CHECK-ICX-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14:
; CHECK-ICX: # %bb.0:
-; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ICX-NEXT: retq
;
; CHECK-V4-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14:
; CHECK-V4: # %bb.0:
-; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-V4-NEXT: retq
;
; CHECK-ZNVER4-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14:
; CHECK-ZNVER4: # %bb.0:
-; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; CHECK-ZNVER4-NEXT: retq
%x1 = add <16 x i32> %x, %x
%r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 undef, i32 4, i32 undef, i32 6, i32 undef, i32 8, i32 undef, i32 10, i32 undef, i32 12, i32 undef, i32 14>
diff --git a/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll b/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
index ecd9435..1766b4d 100644
--- a/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
+++ b/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
@@ -58,7 +58,7 @@ define <8 x float> @foo8(<8 x float> %v, ptr%p) nounwind {
define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -68,7 +68,7 @@ define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask2:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -78,7 +78,7 @@ define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask3:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
%res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 3>
@@ -88,7 +88,7 @@ define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask4(<4 x i32> %v, ptr %p) nounwind {
; AVX2-LABEL: undef_splatmask4:
; AVX2: # %bb.0:
-; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,2,2,2]
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX2-NEXT: vmovaps %xmm0, (%rdi)
; AVX2-NEXT: vmovaps %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/sshl_sat_vec.ll b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
index f91758b..10dee14 100644
--- a/llvm/test/CodeGen/X86/sshl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
@@ -602,10 +602,10 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; X64-AVX2-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; X64-AVX2-NEXT: vpsraw $2, %xmm3, %xmm4
-; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
+; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm6
+; X64-AVX2-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3
; X64-AVX2-NEXT: vpsraw $1, %xmm3, %xmm4
-; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5
+; X64-AVX2-NEXT: vpsllw $2, %xmm5, %xmm5
; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; X64-AVX2-NEXT: vpsrlw $8, %xmm3, %xmm3
; X64-AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -613,10 +613,10 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; X64-AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm4
; X64-AVX2-NEXT: vpsraw $2, %xmm4, %xmm5
-; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm4
+; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm6
+; X64-AVX2-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
; X64-AVX2-NEXT: vpsraw $1, %xmm4, %xmm5
-; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X64-AVX2-NEXT: vpsllw $2, %xmm1, %xmm1
; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm1
; X64-AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
; X64-AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
index 209d6a5..93a692c 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
@@ -1911,13 +1911,13 @@ define <2 x i64> @test_v2f64_ogt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmoval %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmoval %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2031,13 +2031,13 @@ define <2 x i64> @test_v2f64_oge_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovael %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmovael %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2151,13 +2151,13 @@ define <2 x i64> @test_v2f64_olt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmoval %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmoval %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2269,13 +2269,13 @@ define <2 x i64> @test_v2f64_ole_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovael %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmovael %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2680,13 +2680,13 @@ define <2 x i64> @test_v2f64_ugt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbl %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmovbl %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2798,13 +2798,13 @@ define <2 x i64> @test_v2f64_uge_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbel %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: ucomisd %xmm2, %xmm4
; SSE-32-NEXT: cmovbel %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -2916,13 +2916,13 @@ define <2 x i64> @test_v2f64_ult_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbl %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmovbl %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
@@ -3036,13 +3036,13 @@ define <2 x i64> @test_v2f64_ule_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1,
; SSE-32-NEXT: movl $0, %edx
; SSE-32-NEXT: cmovbel %ecx, %edx
; SSE-32-NEXT: movd %edx, %xmm3
-; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-32-NEXT: ucomisd %xmm4, %xmm2
; SSE-32-NEXT: cmovbel %ecx, %eax
; SSE-32-NEXT: movd %eax, %xmm2
-; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-32-NEXT: pand %xmm3, %xmm0
; SSE-32-NEXT: pandn %xmm1, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
index 9ecc629..b378dce 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
@@ -162,7 +162,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE2-NEXT: pslld $23, %xmm1
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -182,7 +182,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pslld $23, %xmm1
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -200,7 +200,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -277,7 +277,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
@@ -289,7 +289,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X86-SSE2-NEXT: pslld $23, %xmm1
; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
index 322ebe2..06ff7e7 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll
@@ -250,7 +250,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt)
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pandn %xmm4, %xmm5
@@ -286,7 +286,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm3 = [31,31,31,31]
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: pandn %xmm3, %xmm4
@@ -316,7 +316,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -423,7 +423,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; XOPAVX1-NEXT: vpshld %xmm4, %xmm0, %xmm0
@@ -450,7 +450,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
; X86-SSE2-NEXT: pandn %xmm4, %xmm5
diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index 20be579..9b52857 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -536,14 +536,14 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm5
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm6
; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm4
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm4
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm4
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm6
+; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm5
+; AVX1-NEXT: vpsllw $2, %xmm4, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm5
+; AVX1-NEXT: vpsllw $3, %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll
index 1f16463..a387562 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll
@@ -328,15 +328,15 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
; AVX1-NEXT: vpsrlw $8, %xmm7, %xmm8
-; AVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm5
-; AVX1-NEXT: vpsrlw $4, %xmm5, %xmm7
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlw $2, %xmm5, %xmm7
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm7
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7
+; AVX1-NEXT: vpsrlw $4, %xmm7, %xmm8
+; AVX1-NEXT: vpblendvb %xmm6, %xmm8, %xmm7, %xmm6
+; AVX1-NEXT: vpsrlw $2, %xmm6, %xmm7
+; AVX1-NEXT: vpsllw $2, %xmm5, %xmm8
+; AVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm7
+; AVX1-NEXT: vpsllw $3, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm5
; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm6
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm6[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpslld $23, %xmm4, %xmm7
@@ -358,14 +358,14 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm7
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm8
; AVX1-NEXT: vpblendvb %xmm6, %xmm8, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm6
-; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm6
-; AVX1-NEXT: vpaddw %xmm7, %xmm7, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm6
-; AVX1-NEXT: vpaddw %xmm7, %xmm7, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm8
+; AVX1-NEXT: vpblendvb %xmm7, %xmm8, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm7
+; AVX1-NEXT: vpsllw $2, %xmm6, %xmm8
+; AVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm7
+; AVX1-NEXT: vpsllw $3, %xmm6, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
index 178c02f..ef5ffe4 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
@@ -172,7 +172,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubd %xmm1, %xmm2
; SSE2-NEXT: pslld $23, %xmm2
@@ -194,7 +194,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubd %xmm1, %xmm2
; SSE41-NEXT: pslld $23, %xmm2
@@ -214,7 +214,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
@@ -293,7 +293,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0
@@ -309,7 +309,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubd %xmm1, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-fshr-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-sub128.ll
index 372deb05..2d8670a 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-sub128.ll
@@ -251,7 +251,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt)
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
@@ -287,7 +287,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE41-NEXT: pmovsxbd {{.*#+}} xmm3 = [31,31,31,31]
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: pand %xmm3, %xmm4
@@ -317,7 +317,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -425,7 +425,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; XOPAVX1-LABEL: splatvar_funnnel_v2i32:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31]
; XOPAVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4
; XOPAVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
@@ -452,7 +452,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
; X86-SSE2-NEXT: pand %xmm4, %xmm5
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index f57efb4..1e11ea9 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -1409,11 +1409,11 @@ define <2 x i64> @load_sext_2i1_to_2i64(ptr%ptr) {
; X86-SSE2-NEXT: movzbl %al, %eax
; X86-SSE2-NEXT: negl %eax
; X86-SSE2-NEXT: movd %eax, %xmm0
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
; X86-SSE2-NEXT: andl $1, %ecx
; X86-SSE2-NEXT: negl %ecx
; X86-SSE2-NEXT: movd %ecx, %xmm0
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-SSE2-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
index 02f0f53..d565ef0 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -293,14 +293,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i16:
@@ -494,10 +494,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpsllw $2, %xmm2, %xmm2
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -505,10 +505,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4
+; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsllw $2, %xmm1, %xmm1
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
index 15855e3..249bcba 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -237,29 +237,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpsraw $8, %xmm4, %xmm5
-; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
-; AVX1-NEXT: vpsraw $4, %xmm2, %xmm4
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsraw $2, %xmm2, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsraw $1, %xmm2, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
+; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
+; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm2
+; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm4
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $4, %xmm0, %xmm4
+; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -339,29 +339,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; X86-AVX1-NEXT: vpsraw $8, %xmm4, %xmm5
-; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
-; X86-AVX1-NEXT: vpsraw $4, %xmm2, %xmm4
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; X86-AVX1-NEXT: vpsraw $2, %xmm2, %xmm4
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; X86-AVX1-NEXT: vpsraw $1, %xmm2, %xmm4
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
+; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
+; X86-AVX1-NEXT: vpsraw $2, %xmm3, %xmm4
+; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm5
+; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
+; X86-AVX1-NEXT: vpsraw $1, %xmm3, %xmm4
+; X86-AVX1-NEXT: vpsllw $3, %xmm2, %xmm2
+; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; X86-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
; X86-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
; X86-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
; X86-AVX1-NEXT: vpsraw $8, %xmm0, %xmm4
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4
+; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm3
+; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm4
+; X86-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
+; X86-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X86-AVX1-NEXT: retl
;
@@ -393,10 +393,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7
+; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm3, %xmm3
; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -404,10 +404,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm2
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
@@ -417,10 +417,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm3, %xmm3
; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -428,10 +428,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: vpsraw $4, %xmm0, %xmm4
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm4
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm4
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
@@ -446,10 +446,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; AVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsllw $2, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -457,10 +457,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsllw $2, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
@@ -498,10 +498,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $2, %ymm2, %ymm2
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -509,10 +509,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3
; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm1
; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
@@ -534,10 +534,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: vpsraw $4, %ymm3, %ymm4
; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; AVX512DQVL-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; AVX512DQVL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; AVX512DQVL-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpsllw $2, %ymm2, %ymm2
; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; AVX512DQVL-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -545,10 +545,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: vpsraw $4, %ymm0, %ymm3
; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX512DQVL-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; AVX512DQVL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; AVX512DQVL-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQVL-NEXT: vpsllw $2, %ymm1, %ymm1
; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; AVX512DQVL-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512DQVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
@@ -572,10 +572,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
; X86-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
+; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7
+; X86-AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5
; X86-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X86-AVX1-NEXT: vpsllw $2, %xmm3, %xmm3
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3
; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -583,10 +583,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
-; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6
+; X86-AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
-; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm2
; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
; X86-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
@@ -596,10 +596,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
+; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6
+; X86-AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; X86-AVX1-NEXT: vpsllw $2, %xmm3, %xmm3
; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -607,10 +607,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm4
-; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5
+; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm4
-; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm1
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
@@ -625,10 +625,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; X86-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
-; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5
+; X86-AVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
; X86-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
-; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; X86-AVX2-NEXT: vpsllw $2, %ymm2, %ymm2
; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; X86-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -636,10 +636,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
-; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4
+; X86-AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
-; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; X86-AVX2-NEXT: vpsllw $2, %ymm1, %ymm1
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; X86-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
index ea0745b..0fb0420 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
@@ -59,10 +59,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6
; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6
-; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm7
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5
; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6
-; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsllw $2, %ymm3, %ymm3
; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3
; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -70,10 +70,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $2, %ymm2, %ymm2
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2
; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm2, %ymm2
@@ -83,10 +83,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsllw $2, %ymm3, %ymm3
; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -94,10 +94,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm4
; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm4
-; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm5, %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm4
-; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm1
; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
@@ -113,11 +113,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; AVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
-; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
+; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5
+; AVX512BW-NEXT: vpmovb2m %zmm5, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; AVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4
; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
@@ -127,11 +127,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
; AVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4
+; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
index f7de8d4..c5d3297 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
@@ -196,14 +196,14 @@ define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i16:
@@ -367,14 +367,14 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v2i16:
@@ -568,10 +568,10 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpsllw $2, %xmm2, %xmm2
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -579,10 +579,10 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4
+; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsllw $2, %xmm1, %xmm1
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -796,10 +796,10 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpsllw $2, %xmm2, %xmm2
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -807,10 +807,10 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4
+; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsllw $2, %xmm1, %xmm1
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -1024,10 +1024,10 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5
+; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpsllw $2, %xmm2, %xmm2
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1035,10 +1035,10 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4
+; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsllw $2, %xmm1, %xmm1
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
index 1d1697a..8cb2c7b 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -262,14 +262,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i16:
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
index 3a4bb22..606adb4 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -198,29 +198,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5
-; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
-; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5
+; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm2
+; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -300,29 +300,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; X86-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5
-; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
-; X86-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; X86-AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; X86-AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
+; X86-AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5
+; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
+; X86-AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4
+; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm5
+; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
+; X86-AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4
+; X86-AVX1-NEXT: vpsllw $3, %xmm2, %xmm2
+; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
; X86-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
; X86-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
; X86-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4
; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
+; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
+; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm4
+; X86-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
+; X86-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X86-AVX1-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
index 7928111..57874c4 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
@@ -196,14 +196,14 @@ define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i16:
@@ -367,14 +367,14 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v2i16:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
index dbbfaab..be41945 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
@@ -8079,14 +8079,14 @@ define <16 x i16> @pr43230(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-zext.ll b/llvm/test/CodeGen/X86/vector-zext.ll
index bd1a48b..7b0f1c9 100644
--- a/llvm/test/CodeGen/X86/vector-zext.ll
+++ b/llvm/test/CodeGen/X86/vector-zext.ll
@@ -2555,7 +2555,7 @@ entry:
define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) {
; SSE2-LABEL: splatshuf_zext_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movdqa %xmm0, %xmm1
@@ -2563,7 +2563,7 @@ define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) {
;
; SSSE3-LABEL: splatshuf_zext_v4i64:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
@@ -2571,7 +2571,7 @@ define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) {
;
; SSE41-LABEL: splatshuf_zext_v4i64:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 19a31a6..31ed745 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -911,7 +911,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 16(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: paddb (%rdx), %xmm0
; SSE2-NEXT: movdqa %xmm0, (%rcx)
@@ -1898,7 +1898,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 32(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -4610,7 +4610,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: paddb 48(%rsi), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -6544,7 +6544,7 @@ define void @vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8(ptr %in.
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: paddb (%rsi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movdqa 16(%rdx), %xmm1
; SSE2-NEXT: paddb %xmm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index 239472c..5b4cdd2 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -769,7 +769,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; SSE2-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: paddb (%rsi), %xmm1
; SSE2-NEXT: movdqa %xmm1, (%rdx)
@@ -1522,7 +1522,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
; SSE2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
@@ -3660,7 +3660,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
; SSE2-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: pxor %xmm0, %xmm0
@@ -5250,7 +5250,7 @@ define void @vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2(ptr %
define void @vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8(ptr %in.elt.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
; SSE2-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,1,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movdqa 16(%rsi), %xmm1