aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll64
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir5
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll110
-rw-r--r--llvm/test/CodeGen/AArch64/abds-neg.ll46
-rw-r--r--llvm/test/CodeGen/AArch64/abds.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/abdu-neg.ll46
-rw-r--r--llvm/test/CodeGen/AArch64/abdu.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/adc.ll49
-rw-r--r--llvm/test/CodeGen/AArch64/addcarry-crash.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/cmp-to-cmn.ll437
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll52
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll91
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll77
-rw-r--r--llvm/test/CodeGen/AArch64/midpoint-int.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/neon-dotreduce.ll4360
-rw-r--r--llvm/test/CodeGen/AArch64/neon-extmul.ll108
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-and-tst.ll275
-rw-r--r--llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir23
-rw-r--r--llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir161
-rw-r--r--llvm/test/CodeGen/AArch64/selectopt-const.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/store-float-conversion.ll131
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vector-interleave.ll103
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vscale-combine.ll105
-rw-r--r--llvm/test/CodeGen/AArch64/tbl-loops.ll41
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add.ll337
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir1254
-rw-r--r--llvm/test/CodeGen/AMDGPU/add-max.ll62
-rw-r--r--llvm/test/CodeGen/AMDGPU/bug-undef-spilled-agpr.mir97
-rw-r--r--llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll1783
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.ll633
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll98
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll184
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll539
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll164
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp10.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log10.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/memmove-var-size.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/rcp-pattern.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f32.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f64.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/udivrem24.ll1984
-rw-r--r--llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll29
-rw-r--r--llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/ARM/nop_concat_vectors.ll8
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll12
-rw-r--r--llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll91
-rw-r--r--llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/NVPTX/bug26185-2.ll9
-rw-r--r--llvm/test/CodeGen/NVPTX/combine-wide.ll1339
-rw-r--r--llvm/test/CodeGen/NVPTX/fold-movs.ll38
-rw-r--r--llvm/test/CodeGen/NVPTX/i8x4-instructions.ll168
-rw-r--r--llvm/test/CodeGen/NVPTX/ld-param-sink.ll47
-rw-r--r--llvm/test/CodeGen/NVPTX/local-stack-frame.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/vector-loads.ll11
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-lower-arbitrary-sized-ints.ll61
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/build-vector-tests.ll48
-rw-r--r--llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll6
-rw-r--r--llvm/test/CodeGen/PowerPC/combine-fneg.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-strict-round.ll6
-rw-r--r--llvm/test/CodeGen/PowerPC/frem.ll3
-rw-r--r--llvm/test/CodeGen/PowerPC/froundeven-legalization.ll8
-rw-r--r--llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/ldexp.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.modf.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll347
-rw-r--r--llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll5
-rw-r--r--llvm/test/CodeGen/PowerPC/vec_insert_elt.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll176
-rw-r--r--llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll33
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/memset-inline.ll122
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-load.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/memset-inline.ll126
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir16
-rw-r--r--llvm/test/CodeGen/RISCV/xmips-cbop.ll55
-rw-r--r--llvm/test/CodeGen/RISCV/zilsd.ll19
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll42
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll408
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/logical-struct-access.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll46
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll54
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll75
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll104
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll19
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-mul-07.ll114
-rw-r--r--llvm/test/CodeGen/WebAssembly/target-features-cpus.ll4
-rw-r--r--llvm/test/CodeGen/X86/apx/cf.ll35
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-assembly.ll43
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-tailcall.ll34
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section.ll38
-rw-r--r--llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll185
-rw-r--r--llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir6
-rw-r--r--llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir44
-rw-r--r--llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir (renamed from llvm/test/CodeGen/X86/tail-dup-computed-goto.mir)44
-rw-r--r--llvm/test/CodeGen/X86/pr76416.ll79
-rw-r--r--llvm/test/CodeGen/X86/subreg-fail.mir4
-rw-r--r--llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir451
141 files changed, 14536 insertions, 5352 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
index 2779e89..4a85d84 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
@@ -12,7 +12,7 @@ define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -46,13 +46,13 @@ define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) {
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: liveins: $w1, $x0, $x2
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def $x9, pcsections !0 :: (load (s32) from %ir.pnew)
+ ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def renamable $x9, pcsections !0 :: (load (s32) from %ir.pnew)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.cmpxchg.start:
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0, $x9
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -91,7 +91,7 @@ define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -243,7 +243,7 @@ define i32 @fetch_and_nand(ptr %p) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: renamable $w9 = ANDWri renamable $w8, 2, pcsections !0
; CHECK-NEXT: $w9 = ORNWrs $wzr, killed renamable $w9, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRW killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p)
@@ -295,7 +295,7 @@ define i32 @fetch_and_or(ptr %p) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $w10 = ORRWrs renamable $w8, renamable $w9, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STLXRW killed renamable $w10, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
@@ -726,7 +726,7 @@ define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -750,7 +750,7 @@ define i8 @atomicrmw_xchg_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: early-clobber renamable $w9 = STXRB renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -773,7 +773,7 @@ define i8 @atomicrmw_sub_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -797,7 +797,7 @@ define i8 @atomicrmw_and_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -821,7 +821,7 @@ define i8 @atomicrmw_or_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -845,7 +845,7 @@ define i8 @atomicrmw_xor_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -869,7 +869,7 @@ define i8 @atomicrmw_min_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0
@@ -895,7 +895,7 @@ define i8 @atomicrmw_max_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0
@@ -923,10 +923,10 @@ define i8 @atomicrmw_umin_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STLXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -951,10 +951,10 @@ define i8 @atomicrmw_umax_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -977,7 +977,7 @@ define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1001,7 +1001,7 @@ define i16 @atomicrmw_xchg_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: early-clobber renamable $w9 = STXRH renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -1024,7 +1024,7 @@ define i16 @atomicrmw_sub_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1048,7 +1048,7 @@ define i16 @atomicrmw_and_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1072,7 +1072,7 @@ define i16 @atomicrmw_or_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1096,7 +1096,7 @@ define i16 @atomicrmw_xor_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1120,7 +1120,7 @@ define i16 @atomicrmw_min_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0
@@ -1146,7 +1146,7 @@ define i16 @atomicrmw_max_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0
@@ -1174,10 +1174,10 @@ define i16 @atomicrmw_umin_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STLXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -1202,10 +1202,10 @@ define i16 @atomicrmw_umax_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -1230,7 +1230,7 @@ define { i8, i1 } @cmpxchg_i8(ptr %ptr, i8 %desired, i8 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 7, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0
@@ -1272,7 +1272,7 @@ define { i16, i1 } @cmpxchg_i16(ptr %ptr, i16 %desired, i16 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 15, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 8, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
index 09e5a15..a422f60 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
@@ -667,11 +667,10 @@ body: |
; SELECT-NEXT: {{ $}}
; SELECT-NEXT: %zero:gpr64 = COPY $xzr
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
- ; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0
+ ; SELECT-NEXT: %cmp_lhs:gpr64 = SUBSXrs %zero, %reg0, 63, implicit-def dead $nzcv
; SELECT-NEXT: %reg1:gpr64 = COPY $x1
; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0
- ; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv
- ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv
+ ; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_lhs, %sext_in_reg, 131, implicit-def $nzcv
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; SELECT-NEXT: $w0 = COPY %cmp
; SELECT-NEXT: RET_ReallyLR implicit $w0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
index 578038b..d9cdac4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
@@ -1,8 +1,8 @@
; RUN: llc -O3 -aarch64-enable-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-NoAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64"
@@ -38,24 +38,12 @@ if.end: ; preds = %if.then, %entry
; CHECK-NOT: madd
; CHECK:ldr
-; CHECK-NoAA-LABEL: @test_GEP_CSE(
-; CHECK-NoAA: [[PTR0:%[a-zA-Z0-9]+]] = ptrtoint ptr %string to i64
-; CHECK-NoAA: [[PTR1:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-NoAA: [[PTR2:%[a-zA-Z0-9]+]] = add i64 [[PTR0]], [[PTR1]]
-; CHECK-NoAA: add i64 [[PTR2]], 23052
-; CHECK-NoAA: inttoptr
-; CHECK-NoAA: if.then:
-; CHECK-NoAA-NOT: ptrtoint
-; CHECK-NoAA-NOT: mul
-; CHECK-NoAA: add i64 [[PTR2]], 23048
-; CHECK-NoAA: inttoptr
-
-; CHECK-UseAA-LABEL: @test_GEP_CSE(
-; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]]
-; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23052
-; CHECK-UseAA: if.then:
-; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23048
+; CHECK-IR-LABEL: @test_GEP_CSE(
+; CHECK-IR: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
+; CHECK-IR: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]]
+; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23052
+; CHECK-IR: if.then:
+; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23048
%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]}
%struct.pt = type { ptr, i32, i32 }
diff --git a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
index f7e16b8..9947fba 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
@@ -38,14 +38,12 @@ define <16 x i32> @mul_i32(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v4.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v5.8h, v1.16b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v3.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v3.8h
-; CHECK-GI-NEXT: umull v2.4s, v4.4h, v5.4h
-; CHECK-GI-NEXT: umull2 v3.4s, v4.8h, v5.8h
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v3.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v0.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -75,26 +73,20 @@ define <16 x i64> @mul_i64(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v4.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v2.8h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v16.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v17.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v4.2s, v2.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v4.4s, v2.4s
-; CHECK-GI-NEXT: umull v2.2d, v5.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v5.4s, v3.4s
-; CHECK-GI-NEXT: umull v4.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umull2 v5.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umull v6.2d, v16.2s, v17.2s
-; CHECK-GI-NEXT: umull2 v7.2d, v16.4s, v17.4s
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v5.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v7.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
+; CHECK-GI-NEXT: ushll v4.2d, v5.2s, #0
+; CHECK-GI-NEXT: ushll2 v5.2d, v5.4s, #0
+; CHECK-GI-NEXT: ushll v6.2d, v7.2s, #0
+; CHECK-GI-NEXT: ushll2 v7.2d, v7.4s, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
@@ -142,18 +134,12 @@ define <16 x i32> @mla_i32(<16 x i8> %a, <16 x i8> %b, <16 x i32> %c) {
;
; CHECK-GI-LABEL: mla_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v6.4h, v7.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v6.8h, v7.8h
-; CHECK-GI-NEXT: umlal v4.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v5.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v6.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v7.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v6.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v6.8h
+; CHECK-GI-NEXT: uaddw v2.4s, v4.4s, v7.4h
+; CHECK-GI-NEXT: uaddw2 v3.4s, v5.4s, v7.8h
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -186,35 +172,21 @@ define <16 x i64> @mla_i64(<16 x i8> %a, <16 x i8> %b, <16 x i64> %c) {
;
; CHECK-GI-LABEL: mla_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov v16.16b, v2.16b
-; CHECK-GI-NEXT: mov v17.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
-; CHECK-GI-NEXT: mov v4.16b, v6.16b
-; CHECK-GI-NEXT: mov v5.16b, v7.16b
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v18.4s, v6.4h, #0
-; CHECK-GI-NEXT: ushll v20.4s, v7.4h, #0
-; CHECK-GI-NEXT: ushll2 v19.4s, v6.8h, #0
-; CHECK-GI-NEXT: ushll v21.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v22.4s, v7.8h, #0
-; CHECK-GI-NEXT: ushll v23.4s, v1.4h, #0
-; CHECK-GI-NEXT: ldp q6, q7, [sp]
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v16.2d, v18.2s, v20.2s
-; CHECK-GI-NEXT: umlal2 v17.2d, v18.4s, v20.4s
-; CHECK-GI-NEXT: umlal v2.2d, v19.2s, v22.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v19.4s, v22.4s
-; CHECK-GI-NEXT: umlal v4.2d, v21.2s, v23.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v21.4s, v23.4s
-; CHECK-GI-NEXT: umlal v6.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v7.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v16.16b
-; CHECK-GI-NEXT: mov v1.16b, v17.16b
+; CHECK-GI-NEXT: umull v16.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ldp q19, q20, [sp]
+; CHECK-GI-NEXT: ushll v1.4s, v16.4h, #0
+; CHECK-GI-NEXT: ushll2 v16.4s, v16.8h, #0
+; CHECK-GI-NEXT: ushll v17.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v18.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v16.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v16.4s
+; CHECK-GI-NEXT: uaddw v4.2d, v6.2d, v17.2s
+; CHECK-GI-NEXT: uaddw2 v5.2d, v7.2d, v17.4s
+; CHECK-GI-NEXT: uaddw v6.2d, v19.2d, v18.2s
+; CHECK-GI-NEXT: uaddw2 v7.2d, v20.2d, v18.4s
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll
index 432ffc3..7524782 100644
--- a/llvm/test/CodeGen/AArch64/abds-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abds-neg.ll
@@ -77,10 +77,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w9, w1, w8
-; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w8, w8, w9, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w8
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
%bext = sext i32 %b to i64
@@ -111,10 +109,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -129,10 +125,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w1
-; CHECK-NEXT: sub w9, w8, w0
-; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w8, w8, w9, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w8, w0
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i16 %b to i64
@@ -146,10 +140,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -163,10 +155,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, gt
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, ge
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -180,10 +170,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, gt
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, ge
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -359,9 +347,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w8, w9, ge
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%cmp = icmp sge i32 %a, %b
%ab = sub i32 %a, %b
@@ -373,9 +360,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, lt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ge
; CHECK-NEXT: ret
%cmp = icmp slt i64 %a, %b
%ab = sub i64 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
index ed1e607..bbdb116 100644
--- a/llvm/test/CodeGen/AArch64/abds.ll
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -73,9 +73,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w9, w1, w8
; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w0, w8, w9, gt
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
%bext = sext i32 %b to i64
@@ -104,9 +103,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -120,9 +118,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w1
-; CHECK-NEXT: sub w9, w8, w0
; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w0, w8, w9, gt
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i16 %b to i64
@@ -135,9 +132,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -150,9 +146,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -165,9 +160,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -248,9 +242,8 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_minmax_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
%max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
@@ -261,9 +254,8 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_minmax_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
%max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
@@ -324,9 +316,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%cmp = icmp slt i32 %a, %b
%ab = sub i32 %a, %b
@@ -338,9 +329,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%cmp = icmp sge i64 %a, %b
%ab = sub i64 %a, %b
@@ -572,9 +562,8 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_select_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%cmp = icmp sgt i32 %a, %b
%ab = select i1 %cmp, i32 %a, i32 %b
@@ -586,9 +575,8 @@ define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_select_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%cmp = icmp sge i64 %a, %b
%ab = select i1 %cmp, i64 %a, i64 %b
diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll
index 8fb106e..d07f099a 100644
--- a/llvm/test/CodeGen/AArch64/abdu-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll
@@ -77,10 +77,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w9, w1, w8
-; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w8, w8, w9, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w8
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
%bext = zext i32 %b to i64
@@ -111,10 +109,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -129,10 +125,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w1, #0xffff
-; CHECK-NEXT: sub w9, w8, w0
-; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w8, w8, w9, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w8, w0
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i16 %b to i64
@@ -146,10 +140,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -163,10 +155,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, hi
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, hs
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -180,10 +170,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, hi
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, hs
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -363,9 +351,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w8, w9, hs
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%cmp = icmp uge i32 %a, %b
%ab = sub i32 %a, %b
@@ -377,9 +364,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, lo
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, hs
; CHECK-NEXT: ret
%cmp = icmp ult i64 %a, %b
%ab = sub i64 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
index 4585de9..1045ee2 100644
--- a/llvm/test/CodeGen/AArch64/abdu.ll
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -73,9 +73,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w9, w1, w8
; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w0, w8, w9, hi
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
%bext = zext i32 %b to i64
@@ -104,9 +103,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -120,9 +118,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w1, #0xffff
-; CHECK-NEXT: sub w9, w8, w0
; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w0, w8, w9, hi
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i16 %b to i64
@@ -135,9 +132,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -150,9 +146,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -165,9 +160,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -252,9 +246,8 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_minmax_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
%max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
@@ -265,9 +258,8 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_minmax_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
%max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
@@ -330,9 +322,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%cmp = icmp ult i32 %a, %b
%ab = sub i32 %a, %b
@@ -344,9 +335,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%cmp = icmp uge i64 %a, %b
%ab = sub i64 %a, %b
@@ -437,9 +427,8 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_select_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%cmp = icmp ugt i32 %a, %b
%ab = select i1 %cmp, i32 %a, i32 %b
@@ -451,9 +440,8 @@ define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_select_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%cmp = icmp uge i64 %a, %b
%ab = select i1 %cmp, i64 %a, i64 %b
diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll
index 4b1393f..12e8bf2 100644
--- a/llvm/test/CodeGen/AArch64/adc.ll
+++ b/llvm/test/CodeGen/AArch64/adc.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefix=CHECK-LE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefixes=CHECK-LE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefixes=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 -global-isel | FileCheck --check-prefixes=CHECK-GI %s
define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-LE-LABEL: test_simple:
@@ -18,11 +19,16 @@ define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-BE-NEXT: subs x1, x8, x5
; CHECK-BE-NEXT: sbc x0, x9, x4
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_simple:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x8, x0, x2
+; CHECK-GI-NEXT: adc x9, x1, x3
+; CHECK-GI-NEXT: subs x0, x8, x4
+; CHECK-GI-NEXT: sbc x1, x9, x5
+; CHECK-GI-NEXT: ret
%valadd = add i128 %a, %b
-
%valsub = sub i128 %valadd, %c
-
ret i128 %valsub
}
@@ -38,9 +44,13 @@ define i128 @test_imm(i128 %a) {
; CHECK-BE-NEXT: adds x1, x1, #12
; CHECK-BE-NEXT: cinc x0, x0, hs
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_imm:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x0, x0, #12
+; CHECK-GI-NEXT: adc x1, x1, xzr
+; CHECK-GI-NEXT: ret
%val = add i128 %a, 12
-
ret i128 %val
}
@@ -58,11 +68,16 @@ define i128 @test_shifted(i128 %a, i128 %b) {
; CHECK-BE-NEXT: adds x1, x1, x3, lsl #45
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_shifted:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: lsr x8, x2, #19
+; CHECK-GI-NEXT: adds x0, x0, x2, lsl #45
+; CHECK-GI-NEXT: orr x8, x8, x3, lsl #45
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%rhs = shl i128 %b, 45
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
@@ -86,11 +101,19 @@ define i128 @test_extended(i128 %a, i16 %b) {
; CHECK-BE-NEXT: extr x8, x9, x8, #61
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_extended:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: ; kill: def $w2 killed $w2 def $x2
+; CHECK-GI-NEXT: sxth x8, w2
+; CHECK-GI-NEXT: adds x0, x0, w2, sxth #3
+; CHECK-GI-NEXT: asr x9, x8, #63
+; CHECK-GI-NEXT: lsr x8, x8, #61
+; CHECK-GI-NEXT: orr x8, x8, x9, lsl #3
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%ext = sext i16 %b to i128
%rhs = shl i128 %ext, 3
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
diff --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
index be75ab1..b4556c7 100644
--- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll
+++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
@@ -1,16 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
target triple = "arm64-apple-ios7.0"
define i64 @foo(ptr nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 {
-; CHECK-LABEL: foo:
-; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: lsr x8, x1, #32
-; CHECK-NEXT: ldr w9, [x0, #4]
-; CHECK-NEXT: cmn x3, x2
-; CHECK-NEXT: umull x8, w9, w8
-; CHECK-NEXT: cinc x0, x8, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: lsr x8, x1, #32
+; CHECK-SD-NEXT: ldr w9, [x0, #4]
+; CHECK-SD-NEXT: cmn x3, x2
+; CHECK-SD-NEXT: umull x8, w9, w8
+; CHECK-SD-NEXT: cinc x0, x8, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: ldr x8, [x0]
+; CHECK-GI-NEXT: lsr x9, x1, #32
+; CHECK-GI-NEXT: cmn x3, x2
+; CHECK-GI-NEXT: cset w10, hs
+; CHECK-GI-NEXT: lsr x8, x8, #32
+; CHECK-GI-NEXT: and x10, x10, #0x1
+; CHECK-GI-NEXT: umaddl x0, w8, w9, x10
+; CHECK-GI-NEXT: ret
entry:
%0 = lshr i64 %a, 32
%1 = load i64, ptr %ptr, align 8
@@ -24,3 +37,6 @@ entry:
}
attributes #0 = { norecurse nounwind readonly }
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index b325851..78881c8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s -check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
diff --git a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
index 634d1b9..5f5b27a 100644
--- a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
@@ -59,37 +59,33 @@ bb27: ; preds = %bb9, %bb8
define void @avoid_promotion_2_and(ptr nocapture noundef %arg) {
; CHECK-LABEL: avoid_promotion_2_and:
; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: add x8, x0, #32
-; CHECK-NEXT: b LBB1_2
-; CHECK-NEXT: LBB1_1: ; %latch
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: cmp w9, #2
-; CHECK-NEXT: add x8, x8, #56
-; CHECK-NEXT: b.ls LBB1_4
-; CHECK-NEXT: LBB1_2: ; %loop
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: add x9, x0, #32
+; CHECK-NEXT: LBB1_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr w9, [x8, #20]
-; CHECK-NEXT: cmp w9, #3
-; CHECK-NEXT: b.lo LBB1_1
-; CHECK-NEXT: ; %bb.3: ; %then
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: ldp w13, w12, [x8, #12]
-; CHECK-NEXT: ldr w10, [x8]
+; CHECK-NEXT: ldr w10, [x9, #20]
+; CHECK-NEXT: cmp w10, #3
+; CHECK-NEXT: b.lo LBB1_3
+; CHECK-NEXT: ; %bb.2: ; %then
+; CHECK-NEXT: ; in Loop: Header=BB1_1 Depth=1
+; CHECK-NEXT: ldp w13, w12, [x9, #12]
+; CHECK-NEXT: ldr w10, [x9]
; CHECK-NEXT: ldr x11, [x0]
-; CHECK-NEXT: ldr w14, [x8, #8]
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: ldr w14, [x9, #8]
; CHECK-NEXT: lsl w10, w10, w13
; CHECK-NEXT: ldrb w11, [x11, x12]
; CHECK-NEXT: eor w10, w10, w11
-; CHECK-NEXT: ldur w11, [x8, #-24]
+; CHECK-NEXT: ldur w11, [x9, #-24]
; CHECK-NEXT: and w10, w10, w14
-; CHECK-NEXT: ldp x14, x13, [x8, #-16]
-; CHECK-NEXT: str w10, [x8]
+; CHECK-NEXT: ldp x14, x13, [x9, #-16]
+; CHECK-NEXT: str w10, [x9], #56
; CHECK-NEXT: and w11, w11, w12
; CHECK-NEXT: ldrh w15, [x13, w10, uxtw #1]
; CHECK-NEXT: strh w15, [x14, w11, uxtw #1]
; CHECK-NEXT: strh w12, [x13, w10, uxtw #1]
; CHECK-NEXT: b LBB1_1
-; CHECK-NEXT: LBB1_4: ; %exit
+; CHECK-NEXT: LBB1_3: ; %exit.critedge
; CHECK-NEXT: ret
entry:
br label %loop
diff --git a/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..c4c54175
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..b47607e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..94b657c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
index 5765e0a..b3ce9d2 100644
--- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
@@ -1,14 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "arm64"
define i1 @test_EQ_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp eq i64 %add, %a
@@ -16,11 +23,19 @@ entry:
}
define i1 @test_EQ_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -55,11 +70,19 @@ entry:
}
define i1 @test_EQ_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -68,11 +91,17 @@ entry:
}
define i1 @test_EQ_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp eq i32 %add, %a
@@ -218,11 +247,17 @@ entry:
}
define i1 @test_NE_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp ne i64 %add, %a
@@ -230,11 +265,19 @@ entry:
}
define i1 @test_NE_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -269,11 +312,19 @@ entry:
}
define i1 @test_NE_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -282,11 +333,17 @@ entry:
}
define i1 @test_NE_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp ne i32 %add, %a
@@ -444,161 +501,281 @@ define i1 @cmn_large_imm(i32 %a) {
}
define i1 @almost_immediate_neg_slt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_slt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i64 %x, -16773121
ret i1 %cmp
}
@@ -637,6 +814,24 @@ define i1 @cmn_nsw_neg(i32 %a, i32 %b) {
ret i1 %cmp
}
+define i1 @cmn_swap(i32 %a, i32 %b) {
+; CHECK-SD-LABEL: cmn_swap:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmn_swap:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
+ %sub = sub nsw i32 0, %b
+ %cmp = icmp sgt i32 %sub, %a
+ ret i1 %cmp
+}
+
+
define i1 @cmn_nsw_neg_64(i64 %a, i64 %b) {
; CHECK-LABEL: cmn_nsw_neg_64:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
index 880bd29..d67aa08 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
@@ -14,20 +14,19 @@ target triple = "aarch64"
define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: mov w8, #100 // =0x64
-; CHECK-NEXT: cntd x9
; CHECK-NEXT: whilelo p1.d, xzr, x8
+; CHECK-NEXT: cntd x9
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: mov x11, x9
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
+; CHECK-NEXT: mov x11, x9
; CHECK-NEXT: .LBB0_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: zip2 p2.d, p1.d, p1.d
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: zip1 p1.d, p1.d, p1.d
; CHECK-NEXT: ld1d { z2.d }, p2/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1d { z4.d }, p2/z, [x1, #1, mul vl]
@@ -39,14 +38,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: whilelo p1.d, x11, x8
; CHECK-NEXT: add x11, x11, x9
; CHECK-NEXT: b.mi .LBB0_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -111,21 +110,20 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %cond) {
; CHECK-LABEL: complex_mul_predicated_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntd x9
-; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: neg x10, x9
+; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: and x10, x10, x11
; CHECK-NEXT: rdvl x11, #2
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB1_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld1w { z2.d }, p0/z, [x2, x8, lsl #2]
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: add x8, x8, x9
; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0
; CHECK-NEXT: cmp x10, x8
@@ -141,12 +139,12 @@ define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: b.ne .LBB1_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -213,21 +211,20 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, ptr %cond) {
; CHECK-LABEL: complex_mul_predicated_x2_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: mov w8, #100 // =0x64
-; CHECK-NEXT: cntd x9
; CHECK-NEXT: whilelo p1.d, xzr, x8
+; CHECK-NEXT: cntd x9
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: cnth x11
; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: cnth x11
; CHECK-NEXT: mov x12, x9
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB2_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld1w { z2.d }, p1/z, [x2]
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: add x2, x2, x11
; CHECK-NEXT: and z2.d, z2.d, #0xffffffff
; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
@@ -243,14 +240,14 @@ define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, pt
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: whilelo p1.d, x12, x8
; CHECK-NEXT: add x12, x12, x9
; CHECK-NEXT: b.mi .LBB2_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
index 29be231..0646ca4 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
@@ -14,15 +14,14 @@ target triple = "aarch64"
define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntd x8
-; CHECK-NEXT: mov w10, #100 // =0x64
; CHECK-NEXT: neg x9, x8
+; CHECK-NEXT: mov w10, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and x9, x9, x10
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB0_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z2, [x0, #1, mul vl]
@@ -32,14 +31,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-NEXT: ldr z5, [x1]
; CHECK-NEXT: add x1, x1, x10
; CHECK-NEXT: add x0, x0, x10
-; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #0
-; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #90
+; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #0
+; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #0
+; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #90
+; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: b.ne .LBB0_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -183,17 +182,16 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64_unrolled:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntw x8
-; CHECK-NEXT: mov w10, #1000 // =0x3e8
+; CHECK-NEXT: movi v2.2d, #0000000000000000
+; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: neg x9, x8
+; CHECK-NEXT: mov w10, #1000 // =0x3e8
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and x9, x9, x10
; CHECK-NEXT: rdvl x10, #4
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
-; CHECK-NEXT: mov z2.d, z1.d
-; CHECK-NEXT: mov z3.d, z0.d
; CHECK-NEXT: .LBB2_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z4, [x0, #1, mul vl]
@@ -207,20 +205,20 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
; CHECK-NEXT: ldr z18, [x1, #3, mul vl]
; CHECK-NEXT: ldr z19, [x1, #2, mul vl]
; CHECK-NEXT: add x1, x1, x10
-; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #0
-; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #0
+; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #0
+; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #0
; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #0
; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #0
-; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #90
-; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #90
+; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #90
+; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #90
; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #90
; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #90
; CHECK-NEXT: b.ne .LBB2_1
; CHECK-NEXT: // %bb.2: // %exit.block
; CHECK-NEXT: uzp1 z4.d, z2.d, z3.d
-; CHECK-NEXT: uzp1 z5.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z5.d, z0.d, z1.d
; CHECK-NEXT: uzp2 z2.d, z2.d, z3.d
-; CHECK-NEXT: uzp2 z0.d, z1.d, z0.d
+; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d
; CHECK-NEXT: fadd z1.d, z4.d, z5.d
; CHECK-NEXT: fadd z2.d, z2.d, z0.d
; CHECK-NEXT: faddv d0, p0, z1.d
@@ -310,15 +308,15 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
; CHECK-LABEL: reduction_mix:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v2.2d, #0000000000000000
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: cntd x9
-; CHECK-NEXT: mov w11, #100 // =0x64
+; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: neg x10, x9
+; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: and x10, x10, x11
; CHECK-NEXT: rdvl x11, #2
-; CHECK-NEXT: zip2 z0.d, z2.d, z2.d
-; CHECK-NEXT: zip1 z1.d, z2.d, z2.d
; CHECK-NEXT: .LBB3_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z3, [x0]
@@ -327,13 +325,13 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
; CHECK-NEXT: ld1w { z5.d }, p0/z, [x3, x8, lsl #2]
; CHECK-NEXT: add x8, x8, x9
; CHECK-NEXT: cmp x10, x8
-; CHECK-NEXT: fadd z0.d, z4.d, z0.d
-; CHECK-NEXT: fadd z1.d, z3.d, z1.d
+; CHECK-NEXT: fadd z1.d, z4.d, z1.d
+; CHECK-NEXT: fadd z0.d, z3.d, z0.d
; CHECK-NEXT: add z2.d, z5.d, z2.d
; CHECK-NEXT: b.ne .LBB3_1
; CHECK-NEXT: // %bb.2: // %middle.block
-; CHECK-NEXT: uzp2 z3.d, z1.d, z0.d
-; CHECK-NEXT: uzp1 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp2 z3.d, z0.d, z1.d
+; CHECK-NEXT: uzp1 z1.d, z0.d, z1.d
; CHECK-NEXT: uaddv d2, p0, z2.d
; CHECK-NEXT: faddv d0, p0, z3.d
; CHECK-NEXT: faddv d1, p0, z1.d
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index a9618fd..05ecc9e 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -131,18 +131,83 @@ define <4 x i64> @interleave2_v4i64(<2 x i64> %vec0, <2 x i64> %vec1) {
ret <4 x i64> %retval
}
+define <4 x i16> @interleave2_same_const_splat_v4i16() {
+; CHECK-SD-LABEL: interleave2_same_const_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.4h, #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_same_const_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: ret
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 3))
+ ret <4 x i16> %retval
+}
+
+define <4 x i16> @interleave2_diff_const_splat_v4i16() {
+; CHECK-SD-LABEL: interleave2_diff_const_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: adrp x8, .LCPI11_0
+; CHECK-SD-NEXT: ldr d0, [x8, :lo12:.LCPI11_0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_diff_const_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: mov w9, #4 // =0x4
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: mov v1.h[1], w9
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 4))
+ ret <4 x i16> %retval
+}
-; Float declarations
-declare <4 x half> @llvm.vector.interleave2.v4f16(<2 x half>, <2 x half>)
-declare <8 x half> @llvm.vector.interleave2.v8f16(<4 x half>, <4 x half>)
-declare <16 x half> @llvm.vector.interleave2.v16f16(<8 x half>, <8 x half>)
-declare <4 x float> @llvm.vector.interleave2.v4f32(<2 x float>, <2 x float>)
-declare <8 x float> @llvm.vector.interleave2.v8f32(<4 x float>, <4 x float>)
-declare <4 x double> @llvm.vector.interleave2.v4f64(<2 x double>, <2 x double>)
-
-; Integer declarations
-declare <32 x i8> @llvm.vector.interleave2.v32i8(<16 x i8>, <16 x i8>)
-declare <16 x i16> @llvm.vector.interleave2.v16i16(<8 x i16>, <8 x i16>)
-declare <8 x i32> @llvm.vector.interleave2.v8i32(<4 x i32>, <4 x i32>)
-declare <4 x i64> @llvm.vector.interleave2.v4i64(<2 x i64>, <2 x i64>)
+define <4 x i16> @interleave2_same_nonconst_splat_v4i16(i16 %a) {
+; CHECK-SD-LABEL: interleave2_same_nonconst_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: dup v0.4h, w0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_same_nonconst_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: dup v0.4h, w0
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: ret
+ %ins = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <2 x i16> %ins, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat, <2 x i16> %splat)
+ ret <4 x i16> %retval
+}
+
+define <4 x i16> @interleave2_diff_nonconst_splat_v4i16(i16 %a, i16 %b) {
+; CHECK-SD-LABEL: interleave2_diff_nonconst_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w0
+; CHECK-SD-NEXT: mov v0.h[2], w1
+; CHECK-SD-NEXT: mov v0.h[3], w1
+; CHECK-SD-NEXT: rev32 v1.4h, v0.4h
+; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_diff_nonconst_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: dup v0.4h, w0
+; CHECK-GI-NEXT: dup v1.4h, w1
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
+ %ins1 = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <2 x i16> %ins1, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %ins2 = insertelement <2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <2 x i16> %ins2, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat1, <2 x i16> %splat2)
+ ret <4 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll
index 0f208f8..374def5 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll
+++ b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios < %s | FileCheck %s
+; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=false < %s | sed -e "/; kill: /d" | FileCheck %s
+; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=true < %s | FileCheck %s
; Check there's no assert in spilling from implicit-def operands on an
; IMPLICIT_DEF.
@@ -92,7 +93,6 @@ define void @widget(i32 %arg, i32 %arg1, ptr %arg2, ptr %arg3, ptr %arg4, i32 %a
; CHECK-NEXT: ldr x8, [sp, #40] ; 8-byte Folded Reload
; CHECK-NEXT: mov x0, xzr
; CHECK-NEXT: mov x1, xzr
-; CHECK-NEXT: ; kill: def $w8 killed $w8 killed $x8 def $x8
; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: bl _fprintf
; CHECK-NEXT: brk #0x1
diff --git a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
index c4a027c..381904f 100644
--- a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
+++ b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
@@ -25,77 +25,58 @@ define void @test_interp(ptr %frame, ptr %dst) {
; CHECK-NEXT: adrp x21, _opcode.targets@PAGE
; CHECK-NEXT: Lloh1:
; CHECK-NEXT: add x21, x21, _opcode.targets@PAGEOFF
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov x24, xzr
; CHECK-NEXT: add x8, x21, xzr, lsl #3
; CHECK-NEXT: mov x19, x1
; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: add x23, x22, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: mov w22, #1 ; =0x1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp0: ; Block address taken
; CHECK-NEXT: LBB0_1: ; %loop.header
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: mov x22, xzr
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp1: ; Block address taken
; CHECK-NEXT: LBB0_2: ; %op1.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: str xzr, [x19]
-; CHECK-NEXT: mov w8, #1 ; =0x1
+; CHECK-NEXT: Ltmp2: ; Block address taken
+; CHECK-NEXT: LBB0_3: ; %op6.bb
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
+; CHECK-NEXT: ldr x8, [x0, #8]
+; CHECK-NEXT: str x22, [x0]
+; CHECK-NEXT: ldr x8, [x8, #48]
; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x8, x21, x24, lsl #3
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp2: ; Block address taken
-; CHECK-NEXT: LBB0_3: ; %op2.bb
+; CHECK-NEXT: Ltmp3: ; Block address taken
+; CHECK-NEXT: LBB0_4: ; %op2.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp3: ; Block address taken
-; CHECK-NEXT: LBB0_4: ; %op4.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
-; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
-; CHECK-NEXT: stp x8, x9, [x20, #-8]
-; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
; CHECK-NEXT: Ltmp4: ; Block address taken
-; CHECK-NEXT: LBB0_5: ; %op5.bb
+; CHECK-NEXT: LBB0_5: ; %op4.bb
+; CHECK-NEXT: Ltmp5: ; Block address taken
+; CHECK-NEXT: LBB0_6: ; %op5.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: ldur x8, [x23, #12]
; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
+; CHECK-NEXT: add x23, x23, #20
; CHECK-NEXT: stp x8, x9, [x20, #-8]
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
-; CHECK-NEXT: Ltmp5: ; Block address taken
-; CHECK-NEXT: LBB0_6: ; %op6.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: mov w8, #1 ; =0x1
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
-; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1
entry:
diff --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll
index bbdce7c..15c1dff 100644
--- a/llvm/test/CodeGen/AArch64/midpoint-int.ll
+++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll
@@ -13,10 +13,9 @@
define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: scalar_i32_signed_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w9, w1, w0
-; CHECK-NEXT: subs w10, w0, w1
+; CHECK-NEXT: subs w9, w0, w1
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: csel w9, w10, w9, gt
+; CHECK-NEXT: cneg w9, w9, le
; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
@@ -35,10 +34,9 @@ define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind {
define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: scalar_i32_unsigned_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w9, w1, w0
-; CHECK-NEXT: subs w10, w0, w1
+; CHECK-NEXT: subs w9, w0, w1
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: csel w9, w10, w9, hi
+; CHECK-NEXT: cneg w9, w9, ls
; CHECK-NEXT: cneg w8, w8, ls
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
@@ -61,11 +59,9 @@ define i32 @scalar_i32_signed_mem_reg(ptr %a1_addr, i32 %a2) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: cmp w9, w1
-; CHECK-NEXT: sub w10, w1, w9
+; CHECK-NEXT: subs w10, w9, w1
+; CHECK-NEXT: cneg w10, w10, le
; CHECK-NEXT: cneg w8, w8, le
-; CHECK-NEXT: subs w11, w9, w1
-; CHECK-NEXT: csel w10, w11, w10, gt
; CHECK-NEXT: lsr w10, w10, #1
; CHECK-NEXT: madd w0, w10, w8, w9
; CHECK-NEXT: ret
@@ -86,11 +82,9 @@ define i32 @scalar_i32_signed_reg_mem(i32 %a1, ptr %a2_addr) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x1]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: cmp w0, w9
-; CHECK-NEXT: sub w10, w9, w0
-; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: subs w9, w0, w9
-; CHECK-NEXT: csel w9, w9, w10, gt
+; CHECK-NEXT: cneg w9, w9, le
+; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
; CHECK-NEXT: ret
@@ -112,11 +106,9 @@ define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: ldr w10, [x1]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: cmp w9, w10
-; CHECK-NEXT: sub w11, w10, w9
-; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: subs w10, w9, w10
-; CHECK-NEXT: csel w10, w10, w11, gt
+; CHECK-NEXT: cneg w10, w10, le
+; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: lsr w10, w10, #1
; CHECK-NEXT: madd w0, w10, w8, w9
; CHECK-NEXT: ret
@@ -142,10 +134,9 @@ define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind {
; CHECK-LABEL: scalar_i64_signed_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x9, x1, x0
-; CHECK-NEXT: subs x10, x0, x1
+; CHECK-NEXT: subs x9, x0, x1
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: csel x9, x10, x9, gt
+; CHECK-NEXT: cneg x9, x9, le
; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: lsr x9, x9, #1
; CHECK-NEXT: madd x0, x9, x8, x0
@@ -164,10 +155,9 @@ define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind {
define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind {
; CHECK-LABEL: scalar_i64_unsigned_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x9, x1, x0
-; CHECK-NEXT: subs x10, x0, x1
+; CHECK-NEXT: subs x9, x0, x1
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: csel x9, x10, x9, hi
+; CHECK-NEXT: cneg x9, x9, ls
; CHECK-NEXT: cneg x8, x8, ls
; CHECK-NEXT: lsr x9, x9, #1
; CHECK-NEXT: madd x0, x9, x8, x0
@@ -190,11 +180,9 @@ define i64 @scalar_i64_signed_mem_reg(ptr %a1_addr, i64 %a2) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: cmp x9, x1
-; CHECK-NEXT: sub x10, x1, x9
+; CHECK-NEXT: subs x10, x9, x1
+; CHECK-NEXT: cneg x10, x10, le
; CHECK-NEXT: cneg x8, x8, le
-; CHECK-NEXT: subs x11, x9, x1
-; CHECK-NEXT: csel x10, x11, x10, gt
; CHECK-NEXT: lsr x10, x10, #1
; CHECK-NEXT: madd x0, x10, x8, x9
; CHECK-NEXT: ret
@@ -215,11 +203,9 @@ define i64 @scalar_i64_signed_reg_mem(i64 %a1, ptr %a2_addr) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x1]
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: cmp x0, x9
-; CHECK-NEXT: sub x10, x9, x0
-; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: subs x9, x0, x9
-; CHECK-NEXT: csel x9, x9, x10, gt
+; CHECK-NEXT: cneg x9, x9, le
+; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: lsr x9, x9, #1
; CHECK-NEXT: madd x0, x9, x8, x0
; CHECK-NEXT: ret
@@ -241,11 +227,9 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: ldr x10, [x1]
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: cmp x9, x10
-; CHECK-NEXT: sub x11, x10, x9
-; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: subs x10, x9, x10
-; CHECK-NEXT: csel x10, x10, x11, gt
+; CHECK-NEXT: cneg x10, x10, le
+; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: lsr x10, x10, #1
; CHECK-NEXT: madd x0, x10, x8, x9
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 4f0c408..048e988 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -28,46 +28,28 @@ define i32 @test_udot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_udot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: uxtb w11, w11
-; CHECK-GI-NEXT: uxtb w13, w13
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: uaddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -128,46 +110,28 @@ define i32 @test_sdot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_sdot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: sshll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: sshll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: sshll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -205,22 +169,18 @@ define i32 @test_sdot_v4i8_double(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8
;
; CHECK-GI-LABEL: test_sdot_v4i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: shl v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: shl v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: shl v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: shl v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: sshr v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: mul v1.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
-; CHECK-GI-NEXT: addv s1, v1.4s
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: shl v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: shl v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: shl v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: sshr v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mul v1.4h, v2.4h, v3.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
+; CHECK-GI-NEXT: saddlv s1, v1.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -414,31 +374,60 @@ define i32 @test_udot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: umov w8, v1.b[4]
-; CHECK-GI-NEXT: umov w9, v0.b[4]
-; CHECK-GI-NEXT: umov w10, v1.b[0]
-; CHECK-GI-NEXT: umov w12, v0.b[0]
-; CHECK-GI-NEXT: umov w11, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: umov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: umov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: umov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: umov w8, v0.h[0]
+; CHECK-GI-NEXT: umov w9, v0.h[4]
+; CHECK-GI-NEXT: umov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: umov w8, v0.h[2]
+; CHECK-GI-NEXT: umov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -511,31 +500,60 @@ define i32 @test_sdot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: smov w8, v1.b[4]
-; CHECK-GI-NEXT: smov w9, v0.b[4]
-; CHECK-GI-NEXT: smov w10, v1.b[0]
-; CHECK-GI-NEXT: smov w12, v0.b[0]
-; CHECK-GI-NEXT: smov w11, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: smov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: smov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: smov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -571,59 +589,117 @@ define i32 @test_sdot_v5i8_double(<5 x i8> %a, <5 x i8> %b, <5 x i8> %c, <5 x i8
; CHECK-GI-LABEL: test_sdot_v5i8_double:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov b17, v0.b[1]
+; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3
-; CHECK-GI-NEXT: smov w9, v1.b[0]
-; CHECK-GI-NEXT: smov w10, v0.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[4]
-; CHECK-GI-NEXT: smov w12, v2.b[0]
-; CHECK-GI-NEXT: smov w13, v2.b[4]
-; CHECK-GI-NEXT: smov w14, v3.b[4]
-; CHECK-GI-NEXT: smov w8, v0.b[0]
-; CHECK-GI-NEXT: smov w16, v3.b[0]
-; CHECK-GI-NEXT: smov w15, v0.b[1]
-; CHECK-GI-NEXT: fmov s5, w9
-; CHECK-GI-NEXT: mul w9, w10, w11
-; CHECK-GI-NEXT: smov w10, v1.b[1]
-; CHECK-GI-NEXT: fmov s6, w12
-; CHECK-GI-NEXT: mul w12, w13, w14
-; CHECK-GI-NEXT: smov w11, v2.b[1]
-; CHECK-GI-NEXT: smov w13, v3.b[1]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: fmov s7, w16
-; CHECK-GI-NEXT: fmov s16, w9
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: smov w14, v1.b[2]
-; CHECK-GI-NEXT: fmov s17, w12
-; CHECK-GI-NEXT: smov w9, v3.b[2]
-; CHECK-GI-NEXT: mov v5.s[1], w10
-; CHECK-GI-NEXT: mov v4.s[1], w15
-; CHECK-GI-NEXT: smov w15, v2.b[2]
-; CHECK-GI-NEXT: mov v6.s[1], w11
-; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w13
-; CHECK-GI-NEXT: smov w10, v0.b[3]
-; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: smov w11, v1.b[3]
-; CHECK-GI-NEXT: smov w12, v2.b[3]
-; CHECK-GI-NEXT: smov w13, v3.b[3]
-; CHECK-GI-NEXT: mov v5.s[2], w14
-; CHECK-GI-NEXT: mov v4.s[2], w8
-; CHECK-GI-NEXT: mov v6.s[2], w15
-; CHECK-GI-NEXT: mov v16.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], w9
-; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], w10
-; CHECK-GI-NEXT: mov v6.s[3], w12
-; CHECK-GI-NEXT: mov v16.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], w13
-; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: mla v16.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mla v17.4s, v6.4s, v7.4s
-; CHECK-GI-NEXT: addv s0, v16.4s
-; CHECK-GI-NEXT: addv s1, v17.4s
+; CHECK-GI-NEXT: fmov w11, s1
+; CHECK-GI-NEXT: mov b25, v1.b[1]
+; CHECK-GI-NEXT: mov b16, v1.b[2]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b5, v1.b[4]
+; CHECK-GI-NEXT: mov b22, v2.b[1]
+; CHECK-GI-NEXT: mov b23, v3.b[1]
+; CHECK-GI-NEXT: sxtb w9, w8
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b24, v0.b[2]
+; CHECK-GI-NEXT: fmov w8, s17
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b4, v0.b[4]
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b18, v2.b[2]
+; CHECK-GI-NEXT: mov b19, v2.b[3]
+; CHECK-GI-NEXT: mov b0, v2.b[4]
+; CHECK-GI-NEXT: fmov w9, s25
+; CHECK-GI-NEXT: fmov w12, s22
+; CHECK-GI-NEXT: sxtb w10, w8
+; CHECK-GI-NEXT: mov b21, v3.b[2]
+; CHECK-GI-NEXT: fmov w13, s23
+; CHECK-GI-NEXT: mov b20, v3.b[3]
+; CHECK-GI-NEXT: mov b17, v3.b[4]
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: mov v1.h[1], w10
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov s2, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w11, s3
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v1.h[2], w8
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov s3, w10
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s22, w11
+; CHECK-GI-NEXT: fmov w11, s21
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w12
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: mov v22.h[1], w13
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: fmov w12, s6
+; CHECK-GI-NEXT: mov v3.h[2], w10
+; CHECK-GI-NEXT: fmov w10, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: fmov w11, s4
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v1.h[3], w12
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v1.h[4], w11
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mov v22.h[4], w10
+; CHECK-GI-NEXT: mul v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: mul v1.8h, v3.8h, v22.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w11, v0.h[1]
+; CHECK-GI-NEXT: smov w10, v1.h[0]
+; CHECK-GI-NEXT: smov w12, v1.h[4]
+; CHECK-GI-NEXT: smov w13, v1.h[1]
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v1.h[2]
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: fmov s5, w12
+; CHECK-GI-NEXT: mov v2.s[1], w11
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: smov w10, v0.h[3]
+; CHECK-GI-NEXT: smov w11, v1.h[3]
+; CHECK-GI-NEXT: mov v4.s[1], w13
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v2.s[2], w8
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v4.s[2], w9
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: mov v2.s[3], w10
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v4.s[3], w11
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -2303,11 +2379,14 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2316,132 +2395,282 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: umov w9, v1.b[4]
-; CHECK-GI-NEXT: umov w11, v1.b[5]
-; CHECK-GI-NEXT: umov w18, v1.b[0]
-; CHECK-GI-NEXT: umov w0, v1.b[12]
-; CHECK-GI-NEXT: umov w3, v7.b[4]
-; CHECK-GI-NEXT: umov w12, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v1.b[6]
-; CHECK-GI-NEXT: umov w1, v1.b[13]
-; CHECK-GI-NEXT: umov w4, v7.b[5]
-; CHECK-GI-NEXT: umov w15, v1.b[2]
-; CHECK-GI-NEXT: umov w8, v1.b[3]
-; CHECK-GI-NEXT: umov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: umov w14, v1.b[8]
-; CHECK-GI-NEXT: umov w17, v1.b[9]
-; CHECK-GI-NEXT: umov w10, v1.b[10]
-; CHECK-GI-NEXT: umov w9, v1.b[11]
-; CHECK-GI-NEXT: umov w5, v1.b[14]
-; CHECK-GI-NEXT: umov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: umov w7, v7.b[1]
-; CHECK-GI-NEXT: umov w18, v7.b[6]
-; CHECK-GI-NEXT: umov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: umov w12, v7.b[3]
-; CHECK-GI-NEXT: umov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: umov w13, v7.b[2]
-; CHECK-GI-NEXT: umov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: umov w23, v16.b[12]
-; CHECK-GI-NEXT: umov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: umov w6, v16.b[0]
-; CHECK-GI-NEXT: umov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: umov w16, v16.b[2]
-; CHECK-GI-NEXT: umov w15, v16.b[3]
-; CHECK-GI-NEXT: umov w22, v16.b[5]
-; CHECK-GI-NEXT: umov w5, v16.b[6]
-; CHECK-GI-NEXT: umov w18, v16.b[7]
-; CHECK-GI-NEXT: umov w19, v16.b[8]
-; CHECK-GI-NEXT: umov w7, v16.b[9]
-; CHECK-GI-NEXT: umov w24, v16.b[13]
-; CHECK-GI-NEXT: umov w1, v16.b[10]
-; CHECK-GI-NEXT: umov w17, v16.b[11]
-; CHECK-GI-NEXT: umov w20, v16.b[14]
-; CHECK-GI-NEXT: umov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: umov w21, v3.b[8]
-; CHECK-GI-NEXT: umov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: umov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: umov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: umov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: umov w24, v3.b[2]
-; CHECK-GI-NEXT: umov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: umov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b4, v2.b[1]
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[10]
+; CHECK-GI-NEXT: mov b22, v2.b[11]
+; CHECK-GI-NEXT: fmov w7, s2
+; CHECK-GI-NEXT: fmov w13, s6
+; CHECK-GI-NEXT: mov b6, v2.b[12]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: mov b7, v2.b[13]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: mov b16, v2.b[14]
+; CHECK-GI-NEXT: mov b23, v2.b[15]
+; CHECK-GI-NEXT: ldp q3, q2, [x0]
+; CHECK-GI-NEXT: fmov w26, s19
+; CHECK-GI-NEXT: fmov w19, s4
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov b4, v3.b[2]
+; CHECK-GI-NEXT: mov b5, v3.b[1]
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v1.b[1]
+; CHECK-GI-NEXT: fmov w22, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov b18, v1.b[2]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: uxtb w24, w24
+; CHECK-GI-NEXT: mov b19, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s6
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: mov b21, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s7
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b7, v1.b[5]
+; CHECK-GI-NEXT: mov b6, v3.b[3]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w12, s23
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: mov b23, v1.b[7]
+; CHECK-GI-NEXT: mov b20, v3.b[4]
+; CHECK-GI-NEXT: fmov w28, s4
+; CHECK-GI-NEXT: fmov s4, w26
+; CHECK-GI-NEXT: fmov w14, s16
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: fmov w5, s18
+; CHECK-GI-NEXT: uxtb w12, w12
+; CHECK-GI-NEXT: fmov w4, s19
+; CHECK-GI-NEXT: mov b19, v3.b[5]
+; CHECK-GI-NEXT: uxtb w28, w28
+; CHECK-GI-NEXT: fmov w3, s21
+; CHECK-GI-NEXT: mov b18, v3.b[6]
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w1, s7
+; CHECK-GI-NEXT: mov b16, v3.b[7]
+; CHECK-GI-NEXT: fmov w0, s22
+; CHECK-GI-NEXT: mov b17, v3.b[8]
+; CHECK-GI-NEXT: fmov w17, s23
+; CHECK-GI-NEXT: mov b7, v3.b[9]
+; CHECK-GI-NEXT: fmov w30, s5
+; CHECK-GI-NEXT: mov b5, v3.b[10]
+; CHECK-GI-NEXT: mov b21, v3.b[11]
+; CHECK-GI-NEXT: fmov w25, s6
+; CHECK-GI-NEXT: mov b6, v3.b[12]
+; CHECK-GI-NEXT: fmov w23, s20
+; CHECK-GI-NEXT: mov b20, v3.b[13]
+; CHECK-GI-NEXT: mov b22, v3.b[14]
+; CHECK-GI-NEXT: fmov w6, s3
+; CHECK-GI-NEXT: mov b23, v3.b[15]
+; CHECK-GI-NEXT: fmov s3, w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w21, s19
+; CHECK-GI-NEXT: mov b19, v2.b[1]
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: fmov w24, s6
+; CHECK-GI-NEXT: fmov w7, s16
+; CHECK-GI-NEXT: mov b16, v2.b[2]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w19
+; CHECK-GI-NEXT: uxtb w19, w29
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: uxtb w22, w6
+; CHECK-GI-NEXT: fmov s5, w8
+; CHECK-GI-NEXT: fmov w10, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: fmov w20, s18
+; CHECK-GI-NEXT: uxtb w29, w29
+; CHECK-GI-NEXT: fmov s6, w22
+; CHECK-GI-NEXT: fmov w22, s2
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v5.h[1], w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v2.b[3]
+; CHECK-GI-NEXT: mov v3.h[2], w19
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov v6.h[1], w27
+; CHECK-GI-NEXT: fmov w27, s19
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: fmov s16, w22
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: uxtb w5, w25
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: mov v3.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w4
+; CHECK-GI-NEXT: mov v6.h[2], w28
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: mov v16.h[1], w27
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov b20, v2.b[5]
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov w22, s17
+; CHECK-GI-NEXT: mov v5.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w2
+; CHECK-GI-NEXT: mov v6.h[3], w5
+; CHECK-GI-NEXT: mov b21, v2.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w18
+; CHECK-GI-NEXT: uxtb w18, w23
+; CHECK-GI-NEXT: mov v3.h[4], w13
+; CHECK-GI-NEXT: uxtb w13, w24
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w3
+; CHECK-GI-NEXT: mov v6.h[4], w18
+; CHECK-GI-NEXT: ldr w18, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w16
+; CHECK-GI-NEXT: uxtb w16, w22
+; CHECK-GI-NEXT: mov v5.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w15
+; CHECK-GI-NEXT: uxtb w18, w18
+; CHECK-GI-NEXT: mov v4.h[4], w9
+; CHECK-GI-NEXT: uxtb w9, w21
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[4], w16
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w1
+; CHECK-GI-NEXT: mov v3.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: fmov w19, s22
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: mov v5.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w0
+; CHECK-GI-NEXT: mov v4.h[5], w10
+; CHECK-GI-NEXT: uxtb w10, w20
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: mov v16.h[5], w11
+; CHECK-GI-NEXT: uxtb w8, w14
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v6.h[6], w10
+; CHECK-GI-NEXT: uxtb w10, w19
+; CHECK-GI-NEXT: fmov w6, s23
+; CHECK-GI-NEXT: mov v5.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: mov v3.h[6], w18
+; CHECK-GI-NEXT: mov v4.h[6], w8
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov v7.h[6], w10
+; CHECK-GI-NEXT: mov v16.h[6], w11
+; CHECK-GI-NEXT: uxtb w10, w6
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v6.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w17
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[7], w13
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[7], w12
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[7], w10
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: umov w8, v1.b[8]
+; CHECK-GI-NEXT: umov w9, v2.b[8]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: mul w15, w8, w9
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: umov w16, v3.h[0]
+; CHECK-GI-NEXT: umov w18, v3.h[4]
+; CHECK-GI-NEXT: umov w17, v3.h[1]
+; CHECK-GI-NEXT: umov w1, v2.h[0]
+; CHECK-GI-NEXT: umov w3, v2.h[4]
+; CHECK-GI-NEXT: umov w0, v3.h[5]
+; CHECK-GI-NEXT: umov w5, v1.h[0]
+; CHECK-GI-NEXT: umov w7, v1.h[4]
+; CHECK-GI-NEXT: umov w2, v2.h[1]
+; CHECK-GI-NEXT: umov w4, v2.h[5]
+; CHECK-GI-NEXT: umov w6, v1.h[1]
+; CHECK-GI-NEXT: umov w19, v1.h[5]
+; CHECK-GI-NEXT: umov w10, v3.h[2]
+; CHECK-GI-NEXT: umov w8, v3.h[3]
+; CHECK-GI-NEXT: umov w11, v3.h[6]
+; CHECK-GI-NEXT: umov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w16
+; CHECK-GI-NEXT: fmov s4, w18
+; CHECK-GI-NEXT: fmov s5, w1
+; CHECK-GI-NEXT: fmov s6, w3
+; CHECK-GI-NEXT: fmov s7, w5
+; CHECK-GI-NEXT: fmov s16, w7
+; CHECK-GI-NEXT: fmov s17, w15
+; CHECK-GI-NEXT: umov w12, v2.h[2]
+; CHECK-GI-NEXT: umov w13, v2.h[6]
+; CHECK-GI-NEXT: umov w14, v1.h[2]
+; CHECK-GI-NEXT: umov w16, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mov v4.s[1], w0
+; CHECK-GI-NEXT: mov v5.s[1], w2
+; CHECK-GI-NEXT: mov v6.s[1], w4
+; CHECK-GI-NEXT: mov v7.s[1], w6
+; CHECK-GI-NEXT: mov v16.s[1], w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: umov w15, v2.h[3]
+; CHECK-GI-NEXT: umov w17, v2.h[7]
+; CHECK-GI-NEXT: umov w18, v1.h[3]
+; CHECK-GI-NEXT: umov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w16
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w15
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2580,11 +2809,14 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2593,132 +2825,283 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: smov w9, v1.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[5]
-; CHECK-GI-NEXT: smov w18, v1.b[0]
-; CHECK-GI-NEXT: smov w0, v1.b[12]
-; CHECK-GI-NEXT: smov w3, v7.b[4]
-; CHECK-GI-NEXT: smov w12, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v1.b[6]
-; CHECK-GI-NEXT: smov w1, v1.b[13]
-; CHECK-GI-NEXT: smov w4, v7.b[5]
-; CHECK-GI-NEXT: smov w15, v1.b[2]
-; CHECK-GI-NEXT: smov w8, v1.b[3]
-; CHECK-GI-NEXT: smov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: smov w14, v1.b[8]
-; CHECK-GI-NEXT: smov w17, v1.b[9]
-; CHECK-GI-NEXT: smov w10, v1.b[10]
-; CHECK-GI-NEXT: smov w9, v1.b[11]
-; CHECK-GI-NEXT: smov w5, v1.b[14]
-; CHECK-GI-NEXT: smov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: smov w7, v7.b[1]
-; CHECK-GI-NEXT: smov w18, v7.b[6]
-; CHECK-GI-NEXT: smov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: smov w12, v7.b[3]
-; CHECK-GI-NEXT: smov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: smov w13, v7.b[2]
-; CHECK-GI-NEXT: smov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: smov w23, v16.b[12]
-; CHECK-GI-NEXT: smov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: smov w6, v16.b[0]
-; CHECK-GI-NEXT: smov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: smov w16, v16.b[2]
-; CHECK-GI-NEXT: smov w15, v16.b[3]
-; CHECK-GI-NEXT: smov w22, v16.b[5]
-; CHECK-GI-NEXT: smov w5, v16.b[6]
-; CHECK-GI-NEXT: smov w18, v16.b[7]
-; CHECK-GI-NEXT: smov w19, v16.b[8]
-; CHECK-GI-NEXT: smov w7, v16.b[9]
-; CHECK-GI-NEXT: smov w24, v16.b[13]
-; CHECK-GI-NEXT: smov w1, v16.b[10]
-; CHECK-GI-NEXT: smov w17, v16.b[11]
-; CHECK-GI-NEXT: smov w20, v16.b[14]
-; CHECK-GI-NEXT: smov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: smov w21, v3.b[8]
-; CHECK-GI-NEXT: smov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: smov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: smov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: smov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: smov w24, v3.b[2]
-; CHECK-GI-NEXT: smov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: smov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[15]
+; CHECK-GI-NEXT: mov b3, v2.b[1]
+; CHECK-GI-NEXT: fmov w19, s2
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: fmov w6, s5
+; CHECK-GI-NEXT: mov b5, v2.b[10]
+; CHECK-GI-NEXT: fmov w14, s6
+; CHECK-GI-NEXT: mov b6, v2.b[11]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: mov b7, v2.b[12]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: mov b16, v2.b[13]
+; CHECK-GI-NEXT: mov b18, v1.b[1]
+; CHECK-GI-NEXT: sxtb w6, w6
+; CHECK-GI-NEXT: mov b17, v2.b[14]
+; CHECK-GI-NEXT: ldp q4, q2, [x0]
+; CHECK-GI-NEXT: fmov w25, s19
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: fmov w22, s5
+; CHECK-GI-NEXT: mov b5, v1.b[2]
+; CHECK-GI-NEXT: fmov w0, s6
+; CHECK-GI-NEXT: sxtb w14, w14
+; CHECK-GI-NEXT: mov b20, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s7
+; CHECK-GI-NEXT: mov b7, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s16
+; CHECK-GI-NEXT: sxtb w25, w25
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov b16, v1.b[5]
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: sxtb w22, w22
+; CHECK-GI-NEXT: mov b6, v4.b[2]
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w0, w0
+; CHECK-GI-NEXT: mov b21, v1.b[7]
+; CHECK-GI-NEXT: mov b18, v4.b[4]
+; CHECK-GI-NEXT: fmov w7, s3
+; CHECK-GI-NEXT: mov b3, v4.b[1]
+; CHECK-GI-NEXT: fmov w12, s17
+; CHECK-GI-NEXT: fmov w5, s5
+; CHECK-GI-NEXT: mov b19, v4.b[3]
+; CHECK-GI-NEXT: fmov w4, s20
+; CHECK-GI-NEXT: fmov w3, s7
+; CHECK-GI-NEXT: sxtb w29, w7
+; CHECK-GI-NEXT: mov b17, v4.b[5]
+; CHECK-GI-NEXT: fmov w1, s16
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov b16, v4.b[6]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: mov b7, v4.b[7]
+; CHECK-GI-NEXT: fmov w17, s21
+; CHECK-GI-NEXT: mov b5, v4.b[8]
+; CHECK-GI-NEXT: mov b20, v4.b[9]
+; CHECK-GI-NEXT: fmov w27, s6
+; CHECK-GI-NEXT: mov b6, v4.b[10]
+; CHECK-GI-NEXT: mov b21, v4.b[11]
+; CHECK-GI-NEXT: fmov w21, s18
+; CHECK-GI-NEXT: mov b18, v4.b[12]
+; CHECK-GI-NEXT: mov b22, v4.b[13]
+; CHECK-GI-NEXT: mov b23, v4.b[14]
+; CHECK-GI-NEXT: fmov w10, s4
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v4.b[15]
+; CHECK-GI-NEXT: fmov s4, w25
+; CHECK-GI-NEXT: fmov w30, s3
+; CHECK-GI-NEXT: fmov s3, w28
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov w7, s7
+; CHECK-GI-NEXT: mov b7, v2.b[1]
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s1
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[1], w29
+; CHECK-GI-NEXT: fmov w29, s6
+; CHECK-GI-NEXT: fmov s6, w10
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov w19, s16
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b16, v2.b[3]
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b19, v2.b[2]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov s5, w24
+; CHECK-GI-NEXT: sxtb w24, w30
+; CHECK-GI-NEXT: mov v3.h[2], w6
+; CHECK-GI-NEXT: sxtb w6, w26
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: sxtb w23, w23
+; CHECK-GI-NEXT: mov v6.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: mov v5.h[1], w6
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: fmov w20, s17
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov v3.h[3], w14
+; CHECK-GI-NEXT: sxtb w14, w2
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v7.h[1], w8
+; CHECK-GI-NEXT: fmov w8, s16
+; CHECK-GI-NEXT: fmov s16, w10
+; CHECK-GI-NEXT: mov v6.h[2], w27
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: mov v4.h[3], w0
+; CHECK-GI-NEXT: sxtb w0, w4
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b18, v2.b[5]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: mov v16.h[1], w24
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov v3.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w25
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v6.h[3], w23
+; CHECK-GI-NEXT: mov v5.h[3], w0
+; CHECK-GI-NEXT: fmov w26, s22
+; CHECK-GI-NEXT: mov b19, v2.b[6]
+; CHECK-GI-NEXT: fmov w27, s18
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w28
+; CHECK-GI-NEXT: fmov w22, s23
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: fmov w6, s24
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w11
+; CHECK-GI-NEXT: sxtb w11, w21
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[3], w8
+; CHECK-GI-NEXT: sxtb w8, w16
+; CHECK-GI-NEXT: sxtb w16, w3
+; CHECK-GI-NEXT: mov v6.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v3.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w15
+; CHECK-GI-NEXT: sxtb w15, w27
+; CHECK-GI-NEXT: mov v7.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w1
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.h[4], w16
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w12
+; CHECK-GI-NEXT: sxtb w12, w18
+; CHECK-GI-NEXT: mov v4.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w19
+; CHECK-GI-NEXT: mov v5.h[5], w14
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v16.h[5], w15
+; CHECK-GI-NEXT: mov v3.h[6], w11
+; CHECK-GI-NEXT: sxtb w11, w22
+; CHECK-GI-NEXT: mov v6.h[6], w9
+; CHECK-GI-NEXT: sxtb w9, w13
+; CHECK-GI-NEXT: sxtb w13, w24
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: fmov w11, s17
+; CHECK-GI-NEXT: mov v4.h[6], w10
+; CHECK-GI-NEXT: sxtb w10, w7
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: mov v16.h[6], w13
+; CHECK-GI-NEXT: mov v3.h[7], w8
+; CHECK-GI-NEXT: sxtb w8, w6
+; CHECK-GI-NEXT: smov w12, v1.b[8]
+; CHECK-GI-NEXT: mov v6.h[7], w10
+; CHECK-GI-NEXT: sxtb w10, w17
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[7], w9
+; CHECK-GI-NEXT: mov v7.h[7], w8
+; CHECK-GI-NEXT: smov w8, v2.b[8]
+; CHECK-GI-NEXT: mov v5.h[7], w10
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[7], w11
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul w16, w12, w8
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: smov w17, v3.h[0]
+; CHECK-GI-NEXT: smov w0, v3.h[4]
+; CHECK-GI-NEXT: sxth w16, w16
+; CHECK-GI-NEXT: smov w2, v2.h[0]
+; CHECK-GI-NEXT: smov w4, v2.h[4]
+; CHECK-GI-NEXT: smov w18, v3.h[1]
+; CHECK-GI-NEXT: smov w1, v3.h[5]
+; CHECK-GI-NEXT: smov w3, v2.h[1]
+; CHECK-GI-NEXT: smov w5, v2.h[5]
+; CHECK-GI-NEXT: smov w6, v1.h[0]
+; CHECK-GI-NEXT: smov w19, v1.h[4]
+; CHECK-GI-NEXT: smov w7, v1.h[1]
+; CHECK-GI-NEXT: smov w20, v1.h[5]
+; CHECK-GI-NEXT: smov w10, v3.h[2]
+; CHECK-GI-NEXT: smov w8, v3.h[3]
+; CHECK-GI-NEXT: smov w11, v3.h[6]
+; CHECK-GI-NEXT: smov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w17
+; CHECK-GI-NEXT: fmov s4, w0
+; CHECK-GI-NEXT: fmov s5, w2
+; CHECK-GI-NEXT: fmov s6, w4
+; CHECK-GI-NEXT: fmov s7, w6
+; CHECK-GI-NEXT: fmov s16, w19
+; CHECK-GI-NEXT: fmov s17, w16
+; CHECK-GI-NEXT: smov w12, v2.h[2]
+; CHECK-GI-NEXT: smov w13, v2.h[6]
+; CHECK-GI-NEXT: smov w14, v1.h[2]
+; CHECK-GI-NEXT: smov w15, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w18
+; CHECK-GI-NEXT: mov v4.s[1], w1
+; CHECK-GI-NEXT: mov v5.s[1], w3
+; CHECK-GI-NEXT: mov v6.s[1], w5
+; CHECK-GI-NEXT: mov v7.s[1], w7
+; CHECK-GI-NEXT: mov v16.s[1], w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: smov w16, v2.h[3]
+; CHECK-GI-NEXT: smov w17, v2.h[7]
+; CHECK-GI-NEXT: smov w18, v1.h[3]
+; CHECK-GI-NEXT: smov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w15
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w16
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2948,349 +3331,535 @@ define i32 @test_sdot_v25i8_double(<25 x i8> %a, <25 x i8> %b, <25 x i8> %c, <25
;
; CHECK-GI-LABEL: test_sdot_v25i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp d11, d10, [sp, #-48]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #32] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
; CHECK-GI-NEXT: .cfi_offset w29, -16
-; CHECK-GI-NEXT: .cfi_offset b8, -24
-; CHECK-GI-NEXT: .cfi_offset b9, -32
-; CHECK-GI-NEXT: .cfi_offset b10, -40
-; CHECK-GI-NEXT: .cfi_offset b11, -48
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w10, w4
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w11, w2
-; CHECK-GI-NEXT: sxtb w13, w6
-; CHECK-GI-NEXT: ldr w12, [sp, #72]
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #16]
+; CHECK-GI-NEXT: lsl w10, w1, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #24]
+; CHECK-GI-NEXT: lsl w12, w4, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #56]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #64]
; CHECK-GI-NEXT: fmov s2, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #48]
-; CHECK-GI-NEXT: fmov s4, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #80]
-; CHECK-GI-NEXT: ldr w14, [sp, #128]
-; CHECK-GI-NEXT: ldr w15, [sp, #152]
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #112]
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #32]
+; CHECK-GI-NEXT: sbfx w9, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v4.h[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #152]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: mov v2.s[1], w9
-; CHECK-GI-NEXT: sxtb w9, w5
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s3, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #88]
-; CHECK-GI-NEXT: ldr x29, [sp, #32] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v4.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #56]
-; CHECK-GI-NEXT: fmov s5, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w3
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #40]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #64]
-; CHECK-GI-NEXT: mov v5.s[1], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #104]
+; CHECK-GI-NEXT: mov v4.h[2], w10
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #160]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #96]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v4.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #120]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #112]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w11
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #48]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s3, w8
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #80]
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #168]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], w11
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: mov v5.s[2], w9
-; CHECK-GI-NEXT: sxtb w9, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #144]
-; CHECK-GI-NEXT: ldr w10, [sp, #136]
-; CHECK-GI-NEXT: fmov s6, w11
-; CHECK-GI-NEXT: sxtb w11, w7
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[4], w12
+; CHECK-GI-NEXT: lsl w12, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v3.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #88]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: lsl w8, w10, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #176]
+; CHECK-GI-NEXT: mov v2.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: fmov s6, w9
+; CHECK-GI-NEXT: sbfx w15, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w10, #8
+; CHECK-GI-NEXT: mov v3.h[2], w11
+; CHECK-GI-NEXT: sbfx w11, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #96]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[5], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #224]
+; CHECK-GI-NEXT: mov v6.h[1], w15
+; CHECK-GI-NEXT: mov v2.h[6], w11
+; CHECK-GI-NEXT: lsl w15, w7, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w12, [sp, #104]
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #216]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v2.h[7], w15
+; CHECK-GI-NEXT: lsl w15, w9, #8
+; CHECK-GI-NEXT: mov v4.h[6], w14
+; CHECK-GI-NEXT: mov v6.h[2], w10
+; CHECK-GI-NEXT: lsl w10, w13, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #288]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v3.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #192]
+; CHECK-GI-NEXT: fmov s5, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #232]
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v6.h[3], w12
+; CHECK-GI-NEXT: ldr w8, [sp, #72]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #184]
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: mov v6.s[1], w9
-; CHECK-GI-NEXT: fmov s7, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w11, [sp, #160]
-; CHECK-GI-NEXT: mov v7.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #224]
-; CHECK-GI-NEXT: mov v3.s[3], w9
-; CHECK-GI-NEXT: mov v6.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #192]
-; CHECK-GI-NEXT: fmov s16, w8
-; CHECK-GI-NEXT: fmov s18, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #168]
-; CHECK-GI-NEXT: ldr w13, [sp, #208]
-; CHECK-GI-NEXT: mov v7.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #256]
-; CHECK-GI-NEXT: ldr w8, [sp, #176]
-; CHECK-GI-NEXT: mov v16.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #200]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #232]
-; CHECK-GI-NEXT: mov v6.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #248]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #240]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v18.s[2], w14
-; CHECK-GI-NEXT: fmov s17, w10
+; CHECK-GI-NEXT: mov v5.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #280]
+; CHECK-GI-NEXT: sbfx w15, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #240]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v3.h[5], w15
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #296]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #200]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v6.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #304]
+; CHECK-GI-NEXT: ldr w11, [sp, #128]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v5.h[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #208]
+; CHECK-GI-NEXT: mov v7.h[1], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #248]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v3.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w14, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v7.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #256]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #320]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v5.h[4], w13
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #312]
+; CHECK-GI-NEXT: mov v3.h[7], w10
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v4.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[3], w14
; CHECK-GI-NEXT: ldr w14, [sp, #264]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #288]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w15, [sp, #392]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #280]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #312]
-; CHECK-GI-NEXT: mov v16.s[3], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #296]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w9, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #136]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v5.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mul v16.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v6.h[6], w11
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #328]
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #336]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: smov w9, v16.h[0]
+; CHECK-GI-NEXT: smov w15, v16.h[4]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: smov w17, v16.h[5]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mov v17.s[2], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #320]
-; CHECK-GI-NEXT: fmov s20, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #344]
-; CHECK-GI-NEXT: fmov s19, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w12, [sp, #304]
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v18.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
-; CHECK-GI-NEXT: mov v19.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #328]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s21, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #336]
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v16.h[1]
+; CHECK-GI-NEXT: mov v7.h[5], w14
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: ldr w14, [sp, #344]
+; CHECK-GI-NEXT: ldr w8, [sp, #352]
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: fmov s3, w15
+; CHECK-GI-NEXT: lsl w9, w12, #8
+; CHECK-GI-NEXT: sbfx w12, w16, #8, #8
; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v17.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #376]
-; CHECK-GI-NEXT: mov v20.s[2], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #368]
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v19.s[2], w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w14, [sp, #384]
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v20.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: mul w10, w8, w10
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: mov v19.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #416]
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: ldr w9, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #432]
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #448]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v21.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #424]
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #480]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v23.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #456]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s24, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #440]
-; CHECK-GI-NEXT: mov v25.s[1], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: smov w15, v16.h[3]
+; CHECK-GI-NEXT: mov v2.s[1], w13
+; CHECK-GI-NEXT: smov w13, v16.h[2]
+; CHECK-GI-NEXT: mov v6.h[7], w12
+; CHECK-GI-NEXT: smov w12, v16.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mul v18.8h, v4.8h, v5.8h
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w9, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #368]
+; CHECK-GI-NEXT: mov v2.s[2], w13
+; CHECK-GI-NEXT: smov w13, v16.h[7]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v3.s[2], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: mul w8, w10, w14
+; CHECK-GI-NEXT: smov w10, v18.h[0]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #376]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w12, v18.h[1]
+; CHECK-GI-NEXT: mov v7.h[7], w16
+; CHECK-GI-NEXT: mov v2.s[3], w15
+; CHECK-GI-NEXT: smov w15, v18.h[4]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v3.s[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #416]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: mov v16.h[1], w11
+; CHECK-GI-NEXT: ldr w10, [sp, #424]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #384]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: fmov s5, w15
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #432]
+; CHECK-GI-NEXT: mov v4.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[5]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v7.8h, v6.8h, v7.8h
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #392]
+; CHECK-GI-NEXT: ldr w13, [sp, #400]
+; CHECK-GI-NEXT: mov v5.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: mov v16.h[3], w14
+; CHECK-GI-NEXT: ldr w10, [sp, #440]
+; CHECK-GI-NEXT: smov w14, v18.h[6]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #456]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sxth w8, w8
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v4.s[2], w12
+; CHECK-GI-NEXT: smov w12, v18.h[3]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v17.h[2], w15
+; CHECK-GI-NEXT: mov v16.h[4], w11
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[7]
+; CHECK-GI-NEXT: ldr w15, [sp, #448]
+; CHECK-GI-NEXT: ldr w11, [sp, #408]
+; CHECK-GI-NEXT: mov v4.s[3], w12
+; CHECK-GI-NEXT: smov w12, v7.h[0]
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #480]
+; CHECK-GI-NEXT: mov v16.h[5], w9
+; CHECK-GI-NEXT: lsl w9, w13, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: mov v5.s[3], w14
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: smov w14, v7.h[1]
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: fmov s6, w12
; CHECK-GI-NEXT: ldr w12, [sp, #488]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: ldr w15, [sp, #504]
-; CHECK-GI-NEXT: ldr w11, [sp, #472]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v24.s[1], w14
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v17.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #496]
+; CHECK-GI-NEXT: fmov s18, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #552]
+; CHECK-GI-NEXT: mov v6.s[1], w14
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
; CHECK-GI-NEXT: ldr w14, [sp, #464]
-; CHECK-GI-NEXT: mov v23.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w8, [sp, #408]
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #496]
-; CHECK-GI-NEXT: mov v25.s[2], w10
+; CHECK-GI-NEXT: mov v16.h[6], w9
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v18.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v17.h[5], w15
+; CHECK-GI-NEXT: sbfx w15, w10, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
; CHECK-GI-NEXT: ldr w10, [sp, #512]
-; CHECK-GI-NEXT: sxtb w9, w14
-; CHECK-GI-NEXT: ldr w14, [sp, #520]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v22.s[1], wzr
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #528]
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w13, w15
-; CHECK-GI-NEXT: fmov s27, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w15, [sp, #552]
-; CHECK-GI-NEXT: mov v25.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #544]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #560]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s28, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #616]
-; CHECK-GI-NEXT: mov v27.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #592]
-; CHECK-GI-NEXT: ldr w15, [sp, #568]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w8, [sp, #536]
-; CHECK-GI-NEXT: ldr w10, [sp, #576]
-; CHECK-GI-NEXT: mov v28.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s30, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #600]
-; CHECK-GI-NEXT: mov v27.s[2], w9
-; CHECK-GI-NEXT: mov v29.s[1], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #608]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #632]
-; CHECK-GI-NEXT: mov v28.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #640]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v29.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #664]
-; CHECK-GI-NEXT: mov v30.s[2], w13
-; CHECK-GI-NEXT: mov v28.s[3], w14
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: ldr w15, [sp, #616]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v16.h[7], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #504]
+; CHECK-GI-NEXT: mov v18.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v17.h[6], w14
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #576]
+; CHECK-GI-NEXT: mov v19.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #624]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s20, w14
; CHECK-GI-NEXT: ldr w14, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #656]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v29.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #688]
-; CHECK-GI-NEXT: fmov s31, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: mov v30.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s8, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w14, [sp, #712]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[1], w13
+; CHECK-GI-NEXT: mov v18.h[3], w16
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v19.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v20.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #688]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v18.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #584]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: fmov s21, w14
+; CHECK-GI-NEXT: ldr w15, [sp, #640]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v20.h[2], w13
; CHECK-GI-NEXT: ldr w13, [sp, #696]
-; CHECK-GI-NEXT: mov v8.s[1], w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w9, [sp, #720]
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #776]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s10, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v22.s[2], wzr
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #704]
-; CHECK-GI-NEXT: mov v9.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #760]
-; CHECK-GI-NEXT: mov v8.s[2], w13
-; CHECK-GI-NEXT: mul w10, w10, w11
-; CHECK-GI-NEXT: mov v10.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #728]
-; CHECK-GI-NEXT: sxtb w11, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v20.4s
-; CHECK-GI-NEXT: mul v7.4s, v7.4s, v21.4s
-; CHECK-GI-NEXT: mul v18.4s, v25.4s, v30.4s
-; CHECK-GI-NEXT: mov v22.s[3], wzr
-; CHECK-GI-NEXT: fmov s11, w10
-; CHECK-GI-NEXT: mov v9.s[2], w11
-; CHECK-GI-NEXT: ldr w10, [sp, #768]
-; CHECK-GI-NEXT: mov v8.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #672]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v21.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #592]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v19.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #704]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.h[3], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #648]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v21.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #600]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v19.h[5], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #656]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v21.h[3], w10
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #608]
+; CHECK-GI-NEXT: mov v20.h[4], w15
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #528]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: mov v19.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #720]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w16, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v20.h[5], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #728]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v19.h[7], w16
+; CHECK-GI-NEXT: ldr w9, [sp, #472]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v18.h[6], w14
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v21.h[5], w13
+; CHECK-GI-NEXT: ldr w15, [sp, #672]
+; CHECK-GI-NEXT: ldr w11, [sp, #536]
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v20.h[6], w14
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v19.8h, v16.8h, v19.8h
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v21.h[6], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v7.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w12, v7.h[4]
+; CHECK-GI-NEXT: mov v17.h[7], w9
+; CHECK-GI-NEXT: mov v20.h[7], w15
+; CHECK-GI-NEXT: smov w9, v7.h[5]
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w11, v19.h[4]
+; CHECK-GI-NEXT: ldr w15, [sp, #744]
+; CHECK-GI-NEXT: mov v21.h[7], w13
+; CHECK-GI-NEXT: mov v6.s[2], w14
+; CHECK-GI-NEXT: smov w14, v19.h[0]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w13, v19.h[5]
+; CHECK-GI-NEXT: smov w12, v19.h[1]
+; CHECK-GI-NEXT: mul v20.8h, v17.8h, v20.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #544]
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: mul v22.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: fmov s18, w11
+; CHECK-GI-NEXT: mov v16.s[1], w9
+; CHECK-GI-NEXT: fmov s17, w14
+; CHECK-GI-NEXT: smov w14, v7.h[6]
+; CHECK-GI-NEXT: smov w11, v19.h[2]
+; CHECK-GI-NEXT: smov w9, v7.h[3]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mla v5.4s, v3.4s, v17.4s
-; CHECK-GI-NEXT: mov v11.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: mov v9.s[3], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mla v7.4s, v6.4s, v19.4s
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v20.4s, v26.4s, v8.4s
-; CHECK-GI-NEXT: mla v18.4s, v23.4s, v29.4s
-; CHECK-GI-NEXT: mov v31.s[3], w9
-; CHECK-GI-NEXT: add v1.4s, v22.4s, v1.4s
-; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mov v11.s[2], wzr
-; CHECK-GI-NEXT: mov v10.s[3], w8
-; CHECK-GI-NEXT: mul v21.4s, v28.4s, v9.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v7.4s, v1.4s
-; CHECK-GI-NEXT: mla v20.4s, v24.4s, v31.4s
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: mla v21.4s, v27.4s, v10.4s
+; CHECK-GI-NEXT: fmov s21, w8
+; CHECK-GI-NEXT: mov v18.s[1], w13
+; CHECK-GI-NEXT: sxtb w13, w15
+; CHECK-GI-NEXT: smov w15, v20.h[0]
+; CHECK-GI-NEXT: mov v17.s[1], w12
+; CHECK-GI-NEXT: smov w8, v7.h[7]
+; CHECK-GI-NEXT: smov w12, v19.h[6]
+; CHECK-GI-NEXT: mov v16.s[2], w14
+; CHECK-GI-NEXT: smov w14, v20.h[1]
+; CHECK-GI-NEXT: mul w10, w10, w13
+; CHECK-GI-NEXT: smov w13, v20.h[4]
+; CHECK-GI-NEXT: smov w16, v20.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
+; CHECK-GI-NEXT: fmov s7, w15
+; CHECK-GI-NEXT: smov w15, v20.h[2]
+; CHECK-GI-NEXT: mov v6.s[3], w9
+; CHECK-GI-NEXT: mov v17.s[2], w11
+; CHECK-GI-NEXT: smov w11, v22.h[0]
+; CHECK-GI-NEXT: sxth w10, w10
+; CHECK-GI-NEXT: mov v18.s[2], w12
+; CHECK-GI-NEXT: smov w12, v22.h[1]
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v7.s[1], w14
+; CHECK-GI-NEXT: smov w14, v22.h[4]
+; CHECK-GI-NEXT: fmov s23, w13
+; CHECK-GI-NEXT: smov w13, v22.h[5]
+; CHECK-GI-NEXT: fmov s26, w10
+; CHECK-GI-NEXT: smov w10, v19.h[7]
+; CHECK-GI-NEXT: fmov s24, w11
+; CHECK-GI-NEXT: smov w11, v20.h[6]
+; CHECK-GI-NEXT: mov v21.s[2], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w16
+; CHECK-GI-NEXT: add v4.4s, v6.4s, v16.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: fmov s25, w14
+; CHECK-GI-NEXT: smov w14, v22.h[2]
+; CHECK-GI-NEXT: mov v26.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], w12
+; CHECK-GI-NEXT: smov w12, v19.h[3]
+; CHECK-GI-NEXT: mov v7.s[2], w15
+; CHECK-GI-NEXT: smov w15, v20.h[3]
+; CHECK-GI-NEXT: mov v18.s[3], w10
+; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: mov v25.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[6]
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: smov w11, v20.h[7]
+; CHECK-GI-NEXT: mov v26.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], w14
+; CHECK-GI-NEXT: smov w14, v22.h[3]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: mov v7.s[3], w15
+; CHECK-GI-NEXT: add v1.4s, v21.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[2], w13
+; CHECK-GI-NEXT: smov w13, v22.h[7]
+; CHECK-GI-NEXT: mov v23.s[3], w11
+; CHECK-GI-NEXT: mov v26.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], w14
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v18.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[3], w13
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v23.4s
+; CHECK-GI-NEXT: add v0.4s, v26.4s, v0.4s
; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: add v3.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v11.4s, v0.4s
+; CHECK-GI-NEXT: add v7.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: add v3.4s, v5.4s, v6.4s
; CHECK-GI-NEXT: addv s1, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v7.4s, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: ldp d11, d10, [sp], #48 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <25 x i8> %a to <25 x i32>
@@ -3972,197 +4541,412 @@ define i32 @test_udot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s6, wzr
; CHECK-GI-NEXT: fmov s0, wzr
; CHECK-GI-NEXT: fmov s1, wzr
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: umov w8, v21.b[0]
-; CHECK-GI-NEXT: umov w9, v21.b[4]
-; CHECK-GI-NEXT: umov w10, v21.b[1]
-; CHECK-GI-NEXT: umov w13, v21.b[8]
-; CHECK-GI-NEXT: umov w11, v21.b[5]
-; CHECK-GI-NEXT: umov w14, v21.b[9]
-; CHECK-GI-NEXT: umov w15, v25.b[0]
-; CHECK-GI-NEXT: umov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: mov b29, v7.b[13]
+; CHECK-GI-NEXT: mov b30, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[7]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w13, s19
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b17, v16.b[2]
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w23, s21
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v16.b[1]
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: fmov w5, s17
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b21, v16.b[5]
+; CHECK-GI-NEXT: fmov s17, w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: fmov w20, s22
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w16
+; CHECK-GI-NEXT: mov b19, v16.b[3]
+; CHECK-GI-NEXT: mov b22, v16.b[4]
+; CHECK-GI-NEXT: mov b20, v16.b[6]
+; CHECK-GI-NEXT: fmov w21, s27
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: fmov w24, s28
+; CHECK-GI-NEXT: mov b24, v16.b[8]
+; CHECK-GI-NEXT: fmov w22, s29
+; CHECK-GI-NEXT: mov b26, v16.b[9]
+; CHECK-GI-NEXT: fmov w4, s30
+; CHECK-GI-NEXT: uxtb w10, w21
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w13
+; CHECK-GI-NEXT: str s20, [sp] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b25, v16.b[10]
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b27, v16.b[11]
+; CHECK-GI-NEXT: mov b28, v16.b[12]
+; CHECK-GI-NEXT: mov b29, v16.b[13]
+; CHECK-GI-NEXT: mov b30, v16.b[14]
+; CHECK-GI-NEXT: fmov w26, s16
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w27
+; CHECK-GI-NEXT: mov b31, v16.b[15]
+; CHECK-GI-NEXT: ldp q18, q16, [x0]
+; CHECK-GI-NEXT: fmov w2, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov v17.h[2], w9
+; CHECK-GI-NEXT: fmov w14, s22
+; CHECK-GI-NEXT: fmov w3, s25
+; CHECK-GI-NEXT: fmov w15, s19
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: uxtb w8, w23
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: mov b25, v18.b[5]
+; CHECK-GI-NEXT: mov b23, v18.b[6]
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: uxtb w3, w3
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w24
+; CHECK-GI-NEXT: uxtb w24, w7
+; CHECK-GI-NEXT: mov b19, v18.b[3]
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w20
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v18.b[10]
+; CHECK-GI-NEXT: fmov w9, s22
+; CHECK-GI-NEXT: fmov w6, s26
+; CHECK-GI-NEXT: mov v17.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w25
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b26, v18.b[4]
+; CHECK-GI-NEXT: fmov w18, s28
+; CHECK-GI-NEXT: fmov w16, s29
+; CHECK-GI-NEXT: mov v7.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s18
+; CHECK-GI-NEXT: mov b24, v18.b[7]
+; CHECK-GI-NEXT: fmov w30, s21
+; CHECK-GI-NEXT: mov b20, v18.b[8]
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: uxtb w16, w16
+; CHECK-GI-NEXT: mov b28, v18.b[11]
+; CHECK-GI-NEXT: mov b29, v18.b[12]
+; CHECK-GI-NEXT: fmov w23, s25
+; CHECK-GI-NEXT: mov b25, v18.b[13]
+; CHECK-GI-NEXT: fmov w21, s23
+; CHECK-GI-NEXT: mov v7.h[7], w24
+; CHECK-GI-NEXT: uxtb w24, w8
+; CHECK-GI-NEXT: uxtb w8, w9
+; CHECK-GI-NEXT: uxtb w9, w29
+; CHECK-GI-NEXT: mov b23, v18.b[14]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: fmov s21, w24
+; CHECK-GI-NEXT: fmov s18, w26
+; CHECK-GI-NEXT: fmov w28, s19
+; CHECK-GI-NEXT: mov b19, v16.b[1]
+; CHECK-GI-NEXT: mov v17.h[5], w22
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov v21.h[1], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov v18.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w5
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: fmov w24, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w4, s22
+; CHECK-GI-NEXT: mov b22, v16.b[8]
+; CHECK-GI-NEXT: mov v17.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s19
+; CHECK-GI-NEXT: fmov s19, w19
+; CHECK-GI-NEXT: mov v21.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w28
+; CHECK-GI-NEXT: mov v18.h[2], w10
+; CHECK-GI-NEXT: uxtb w10, w6
+; CHECK-GI-NEXT: mov b27, v16.b[9]
+; CHECK-GI-NEXT: fmov w20, s24
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b24, v16.b[3]
+; CHECK-GI-NEXT: fmov w5, s20
+; CHECK-GI-NEXT: mov v19.h[1], w10
+; CHECK-GI-NEXT: fmov w10, s23
+; CHECK-GI-NEXT: fmov s23, w7
+; CHECK-GI-NEXT: mov v21.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w7, s22
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: mov b20, v16.b[10]
+; CHECK-GI-NEXT: fmov w22, s28
+; CHECK-GI-NEXT: fmov w25, s25
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b25, v16.b[5]
+; CHECK-GI-NEXT: fmov w29, s24
+; CHECK-GI-NEXT: fmov s22, w7
+; CHECK-GI-NEXT: mov v23.h[2], w27
+; CHECK-GI-NEXT: mov b24, v16.b[11]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: fmov w26, s29
+; CHECK-GI-NEXT: mov b26, v16.b[4]
+; CHECK-GI-NEXT: mov v19.h[2], w3
+; CHECK-GI-NEXT: uxtb w3, w29
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[1], w11
+; CHECK-GI-NEXT: uxtb w11, w15
+; CHECK-GI-NEXT: uxtb w15, w22
+; CHECK-GI-NEXT: uxtb w22, w23
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: fmov w6, s25
+; CHECK-GI-NEXT: mov v18.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: mov v23.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w17
+; CHECK-GI-NEXT: uxtb w17, w21
+; CHECK-GI-NEXT: mov b25, v16.b[12]
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v21.h[5], w22
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: uxtb w11, w14
+; CHECK-GI-NEXT: uxtb w14, w26
+; CHECK-GI-NEXT: mov v20.h[2], w5
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w19, s26
+; CHECK-GI-NEXT: mov v18.h[4], w11
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v23.h[4], w14
+; CHECK-GI-NEXT: uxtb w14, w25
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b26, v16.b[13]
+; CHECK-GI-NEXT: fmov w7, s25
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w18
+; CHECK-GI-NEXT: uxtb w18, w19
+; CHECK-GI-NEXT: mov v21.h[6], w17
+; CHECK-GI-NEXT: uxtb w17, w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w2
+; CHECK-GI-NEXT: mov v20.h[3], w3
+; CHECK-GI-NEXT: mov v23.h[5], w14
+; CHECK-GI-NEXT: uxtb w14, w24
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w7
+; CHECK-GI-NEXT: fmov w8, s26
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v21.h[7], w17
+; CHECK-GI-NEXT: uxtb w17, w6
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w15, w15
+; CHECK-GI-NEXT: fmov w13, s30
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v20.h[4], w18
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v19.h[5], w16
+; CHECK-GI-NEXT: mov b27, v16.b[14]
+; CHECK-GI-NEXT: mul v24.8h, v7.8h, v21.8h
+; CHECK-GI-NEXT: mov v22.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov b7, v16.b[7]
+; CHECK-GI-NEXT: mov b16, v16.b[15]
+; CHECK-GI-NEXT: fmov w12, s31
+; CHECK-GI-NEXT: mov v17.h[7], w11
+; CHECK-GI-NEXT: uxtb w11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v20.h[5], w17
+; CHECK-GI-NEXT: mov v23.h[7], w8
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov v18.h[6], w15
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov v19.h[6], w11
+; CHECK-GI-NEXT: fmov w12, s16
+; CHECK-GI-NEXT: fmov w11, s7
; CHECK-GI-NEXT: fmov s4, wzr
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v20.h[6], w10
+; CHECK-GI-NEXT: umov w10, v24.h[0]
+; CHECK-GI-NEXT: mul v21.8h, v17.8h, v23.8h
+; CHECK-GI-NEXT: mov v18.h[7], w13
; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v19.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: umov w12, v24.h[4]
+; CHECK-GI-NEXT: mov v22.h[6], w9
+; CHECK-GI-NEXT: umov w9, v24.h[1]
+; CHECK-GI-NEXT: mov v20.h[7], w11
+; CHECK-GI-NEXT: umov w11, v24.h[5]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldrb w10, [x1, #32]
+; CHECK-GI-NEXT: umov w13, v21.h[0]
+; CHECK-GI-NEXT: umov w14, v21.h[1]
+; CHECK-GI-NEXT: umov w15, v21.h[4]
; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: umov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: umov w9, v21.b[3]
-; CHECK-GI-NEXT: umov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: umov w15, v25.b[4]
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: umov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: umov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: mov v22.h[7], w8
+; CHECK-GI-NEXT: umov w12, v24.h[6]
+; CHECK-GI-NEXT: umov w8, v24.h[2]
+; CHECK-GI-NEXT: mov v7.s[1], w9
+; CHECK-GI-NEXT: ldrb w9, [x0, #32]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mul v23.8h, v18.8h, v20.8h
+; CHECK-GI-NEXT: umov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v16.s[1], w11
+; CHECK-GI-NEXT: umov w11, v21.h[5]
+; CHECK-GI-NEXT: fmov s18, w15
+; CHECK-GI-NEXT: mul v19.8h, v19.8h, v22.8h
+; CHECK-GI-NEXT: umov w15, v21.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: mov v17.s[1], w14
+; CHECK-GI-NEXT: umov w14, v21.h[2]
+; CHECK-GI-NEXT: mov v7.s[2], w8
+; CHECK-GI-NEXT: mul w8, w10, w9
+; CHECK-GI-NEXT: umov w9, v23.h[0]
+; CHECK-GI-NEXT: umov w10, v23.h[1]
+; CHECK-GI-NEXT: mov v16.s[2], w12
+; CHECK-GI-NEXT: umov w12, v21.h[3]
+; CHECK-GI-NEXT: mov v18.s[1], w11
+; CHECK-GI-NEXT: umov w11, v23.h[4]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: umov w15, v25.b[13]
+; CHECK-GI-NEXT: mov v17.s[2], w14
+; CHECK-GI-NEXT: umov w14, v23.h[5]
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: umov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: umov w8, v21.b[11]
-; CHECK-GI-NEXT: umov w10, v21.b[14]
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: umov w9, v19.h[1]
; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: umov w11, v25.b[2]
+; CHECK-GI-NEXT: mov v16.s[3], w13
+; CHECK-GI-NEXT: umov w13, v19.h[0]
+; CHECK-GI-NEXT: mov v18.s[2], w15
+; CHECK-GI-NEXT: umov w15, v21.h[7]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: umov w11, v23.h[2]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: umov w12, v19.h[4]
+; CHECK-GI-NEXT: mov v20.s[1], w10
+; CHECK-GI-NEXT: umov w10, v23.h[3]
; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: umov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: umov w14, v21.b[15]
-; CHECK-GI-NEXT: umov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: umov w13, v25.b[12]
+; CHECK-GI-NEXT: umov w16, v24.h[3]
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: umov w13, v19.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: umov w14, v23.h[6]
+; CHECK-GI-NEXT: mov v18.s[3], w15
+; CHECK-GI-NEXT: umov w15, v19.h[2]
+; CHECK-GI-NEXT: mov v20.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[6]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: umov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: umov w8, v26.b[5]
-; CHECK-GI-NEXT: umov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v22.s[1], w9
+; CHECK-GI-NEXT: umov w9, v23.h[7]
+; CHECK-GI-NEXT: fmov s23, w12
+; CHECK-GI-NEXT: umov w12, v19.h[3]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: umov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: umov w9, v25.b[10]
-; CHECK-GI-NEXT: umov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: umov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: umov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: umov w12, v26.b[4]
-; CHECK-GI-NEXT: umov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: umov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: umov w13, v26.b[9]
+; CHECK-GI-NEXT: mov v21.s[2], w14
; CHECK-GI-NEXT: mov v2.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: umov w15, v25.b[15]
+; CHECK-GI-NEXT: mov v23.s[1], w13
; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[6]
-; CHECK-GI-NEXT: umov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: umov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: umov w10, v26.b[3]
; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: umov w15, v22.b[0]
-; CHECK-GI-NEXT: umov w8, v26.b[7]
+; CHECK-GI-NEXT: mov v22.s[2], w15
+; CHECK-GI-NEXT: mov v7.s[3], w16
+; CHECK-GI-NEXT: mov v20.s[3], w10
; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: umov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: umov w10, v22.b[1]
-; CHECK-GI-NEXT: umov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: umov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: umov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: umov w15, v22.b[12]
-; CHECK-GI-NEXT: umov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[13]
+; CHECK-GI-NEXT: mov v21.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: umov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: umov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: umov w15, v26.b[15]
-; CHECK-GI-NEXT: umov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: umov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrb w14, [x1, #32]
-; CHECK-GI-NEXT: umov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[7]
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: mov v22.s[3], w12
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: umov w9, v22.b[7]
-; CHECK-GI-NEXT: umov w11, v22.b[3]
; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v7.4s, v17.4s, v18.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: umov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: add v16.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: mov v23.s[3], w11
; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: umov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: add v17.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v0.4s, v19.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4359,197 +5143,412 @@ define i32 @test_sdot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
-; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
-; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: smov w8, v21.b[0]
-; CHECK-GI-NEXT: smov w9, v21.b[4]
-; CHECK-GI-NEXT: smov w10, v21.b[1]
-; CHECK-GI-NEXT: smov w13, v21.b[8]
-; CHECK-GI-NEXT: smov w11, v21.b[5]
-; CHECK-GI-NEXT: smov w14, v21.b[9]
-; CHECK-GI-NEXT: smov w15, v25.b[0]
-; CHECK-GI-NEXT: smov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: smov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: smov w9, v21.b[3]
-; CHECK-GI-NEXT: smov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: smov w15, v25.b[4]
-; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: smov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: smov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s6, wzr
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: fmov w14, s19
+; CHECK-GI-NEXT: mov b19, v7.b[13]
+; CHECK-GI-NEXT: mov b29, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b30, v16.b[1]
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w24, s21
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w22, s22
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov b17, v16.b[3]
+; CHECK-GI-NEXT: mov b21, v16.b[4]
+; CHECK-GI-NEXT: mov b18, v16.b[5]
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: fmov w25, s27
+; CHECK-GI-NEXT: mov b22, v16.b[7]
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w16
+; CHECK-GI-NEXT: mov b25, v16.b[8]
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b24, v16.b[9]
+; CHECK-GI-NEXT: fmov w5, s29
+; CHECK-GI-NEXT: mov b26, v16.b[10]
+; CHECK-GI-NEXT: mov b19, v16.b[11]
+; CHECK-GI-NEXT: fmov w6, s30
+; CHECK-GI-NEXT: mov b27, v16.b[12]
+; CHECK-GI-NEXT: mov b28, v16.b[13]
+; CHECK-GI-NEXT: mov b29, v16.b[14]
+; CHECK-GI-NEXT: sxtb w30, w23
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: fmov w20, s16
+; CHECK-GI-NEXT: mov b30, v16.b[15]
+; CHECK-GI-NEXT: fmov s16, w8
+; CHECK-GI-NEXT: sxtb w8, w12
+; CHECK-GI-NEXT: fmov w15, s17
+; CHECK-GI-NEXT: fmov w11, s18
+; CHECK-GI-NEXT: ldp q18, q17, [x0]
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w27
+; CHECK-GI-NEXT: fmov w18, s20
+; CHECK-GI-NEXT: sxtb w15, w15
+; CHECK-GI-NEXT: mov v16.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w25
+; CHECK-GI-NEXT: mov b20, v18.b[3]
+; CHECK-GI-NEXT: fmov w2, s22
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: sxtb w18, w18
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: fmov w3, s19
+; CHECK-GI-NEXT: mov b19, v18.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w24
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w28, s20
+; CHECK-GI-NEXT: mov b20, v18.b[11]
+; CHECK-GI-NEXT: fmov w8, s22
+; CHECK-GI-NEXT: mov b25, v18.b[8]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov v7.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w22
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: mov b24, v18.b[4]
+; CHECK-GI-NEXT: mov b23, v18.b[5]
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: sxtb w23, w26
+; CHECK-GI-NEXT: mov v7.h[6], w9
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: mov b19, v18.b[14]
+; CHECK-GI-NEXT: fmov w10, s25
+; CHECK-GI-NEXT: fmov w4, s26
+; CHECK-GI-NEXT: fmov w16, s28
+; CHECK-GI-NEXT: mov b26, v18.b[7]
+; CHECK-GI-NEXT: mov b28, v18.b[10]
+; CHECK-GI-NEXT: fmov w27, s24
+; CHECK-GI-NEXT: mov b24, v18.b[12]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v7.h[7], w7
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: sxtb w4, w4
+; CHECK-GI-NEXT: fmov s20, w23
+; CHECK-GI-NEXT: fmov w25, s23
+; CHECK-GI-NEXT: mov b23, v18.b[13]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: mov v16.h[5], w30
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov b21, v17.b[1]
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: sxtb w20, w6
+; CHECK-GI-NEXT: fmov w6, s19
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov b28, v17.b[8]
+; CHECK-GI-NEXT: fmov s18, w8
+; CHECK-GI-NEXT: sxtb w8, w21
+; CHECK-GI-NEXT: mov v16.h[6], w5
+; CHECK-GI-NEXT: fmov w5, s22
+; CHECK-GI-NEXT: fmov s22, w10
+; CHECK-GI-NEXT: sxtb w10, w27
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: mov v20.h[2], w29
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: sxtb w8, w28
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: sxtb w19, w9
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: mov b25, v17.b[2]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v17.b[9]
+; CHECK-GI-NEXT: mov v22.h[1], w19
+; CHECK-GI-NEXT: fmov w23, s23
+; CHECK-GI-NEXT: mov v20.h[3], w8
+; CHECK-GI-NEXT: mov b23, v17.b[6]
+; CHECK-GI-NEXT: fmov w30, s24
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v17.b[5]
+; CHECK-GI-NEXT: mov v18.h[1], w20
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: mov b25, v17.b[10]
+; CHECK-GI-NEXT: mov v19.h[1], w28
+; CHECK-GI-NEXT: sxtb w28, w29
+; CHECK-GI-NEXT: mov v22.h[2], w26
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov v20.h[4], w10
+; CHECK-GI-NEXT: fmov w10, s28
+; CHECK-GI-NEXT: fmov s21, w27
+; CHECK-GI-NEXT: sxtb w21, w21
+; CHECK-GI-NEXT: mov b27, v17.b[3]
+; CHECK-GI-NEXT: fmov w19, s23
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: fmov w22, s26
+; CHECK-GI-NEXT: mov b26, v17.b[4]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v21.h[1], w28
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b24, v17.b[11]
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v18.h[2], w18
+; CHECK-GI-NEXT: sxtb w18, w25
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: fmov w20, s27
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w9, s26
+; CHECK-GI-NEXT: mov b26, v17.b[12]
+; CHECK-GI-NEXT: sxtb w25, w27
+; CHECK-GI-NEXT: mov v20.h[5], w18
+; CHECK-GI-NEXT: sxtb w18, w3
+; CHECK-GI-NEXT: sxtb w3, w24
+; CHECK-GI-NEXT: mov v23.h[1], w26
+; CHECK-GI-NEXT: mov v21.h[2], w21
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v22.h[3], w7
+; CHECK-GI-NEXT: sxtb w7, w20
+; CHECK-GI-NEXT: mov v19.h[2], w4
+; CHECK-GI-NEXT: sxtb w4, w30
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[3], w15
+; CHECK-GI-NEXT: sxtb w20, w28
+; CHECK-GI-NEXT: sxtb w15, w17
+; CHECK-GI-NEXT: sxtb w17, w22
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v23.h[2], w25
+; CHECK-GI-NEXT: mov v20.h[6], w3
+; CHECK-GI-NEXT: mov v21.h[3], w7
+; CHECK-GI-NEXT: fmov w10, s26
+; CHECK-GI-NEXT: mov v22.h[4], w4
+; CHECK-GI-NEXT: mov v19.h[3], w18
+; CHECK-GI-NEXT: sxtb w18, w23
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b27, v17.b[13]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[3], w20
+; CHECK-GI-NEXT: mov v18.h[4], w13
+; CHECK-GI-NEXT: sxtb w13, w6
+; CHECK-GI-NEXT: mov v20.h[7], w17
+; CHECK-GI-NEXT: mov v21.h[4], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[5], w18
+; CHECK-GI-NEXT: mov b25, v17.b[14]
+; CHECK-GI-NEXT: fmov w26, s27
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: fmov w14, s29
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v23.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w11
+; CHECK-GI-NEXT: sxtb w11, w16
+; CHECK-GI-NEXT: mov v21.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w15, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v18.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v20.8h, v7.8h, v20.8h
+; CHECK-GI-NEXT: mov b7, v17.b[7]
+; CHECK-GI-NEXT: mov v22.h[6], w13
+; CHECK-GI-NEXT: sxtb w13, w5
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v19.h[5], w11
+; CHECK-GI-NEXT: sxtb w11, w2
+; CHECK-GI-NEXT: mov b17, v17.b[15]
+; CHECK-GI-NEXT: mov v18.h[6], w8
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: mov v23.h[5], w15
+; CHECK-GI-NEXT: mov v21.h[6], w10
+; CHECK-GI-NEXT: sxtb w14, w27
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: mov v22.h[7], w13
+; CHECK-GI-NEXT: fmov w12, s30
+; CHECK-GI-NEXT: mov v19.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: smov w10, v20.h[0]
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w13, v20.h[1]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: smov w11, v20.h[4]
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mul v22.8h, v16.8h, v22.8h
+; CHECK-GI-NEXT: smov w14, v20.h[3]
+; CHECK-GI-NEXT: mov v21.h[7], w8
+; CHECK-GI-NEXT: ldrsb w8, [x0, #32]
+; CHECK-GI-NEXT: mov v19.h[7], w12
+; CHECK-GI-NEXT: mov v23.h[7], w9
+; CHECK-GI-NEXT: ldrsb w9, [x1, #32]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: smov w10, v20.h[2]
+; CHECK-GI-NEXT: smov w12, v20.h[5]
+; CHECK-GI-NEXT: fmov s16, w11
+; CHECK-GI-NEXT: mul w9, w9, w8
+; CHECK-GI-NEXT: smov w15, v22.h[4]
+; CHECK-GI-NEXT: smov w17, v22.h[5]
+; CHECK-GI-NEXT: mul v24.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: mov v7.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[0]
+; CHECK-GI-NEXT: mul v18.8h, v19.8h, v23.8h
+; CHECK-GI-NEXT: smov w16, v22.h[1]
+; CHECK-GI-NEXT: smov w8, v20.h[7]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: mov v16.s[1], w12
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: smov w15, v22.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: smov w11, v24.h[0]
+; CHECK-GI-NEXT: mov v7.s[2], w10
+; CHECK-GI-NEXT: smov w10, v20.h[6]
+; CHECK-GI-NEXT: smov w12, v24.h[1]
+; CHECK-GI-NEXT: smov w0, v18.h[4]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mov v19.s[1], w17
+; CHECK-GI-NEXT: smov w17, v18.h[0]
+; CHECK-GI-NEXT: smov w18, v18.h[1]
+; CHECK-GI-NEXT: smov w13, v22.h[2]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: smov w15, v25.b[13]
+; CHECK-GI-NEXT: fmov s20, w11
+; CHECK-GI-NEXT: smov w11, v24.h[4]
+; CHECK-GI-NEXT: mov v7.s[3], w14
+; CHECK-GI-NEXT: smov w14, v24.h[5]
+; CHECK-GI-NEXT: mov v17.s[1], w16
+; CHECK-GI-NEXT: smov w16, v24.h[2]
+; CHECK-GI-NEXT: mov v19.s[2], w15
+; CHECK-GI-NEXT: smov w15, v18.h[5]
+; CHECK-GI-NEXT: fmov s23, w0
+; CHECK-GI-NEXT: mov v20.s[1], w12
+; CHECK-GI-NEXT: mov v16.s[2], w10
+; CHECK-GI-NEXT: smov w10, v22.h[3]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: smov w11, v22.h[7]
+; CHECK-GI-NEXT: fmov s22, w17
+; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: smov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: smov w8, v21.b[11]
-; CHECK-GI-NEXT: smov w10, v21.b[14]
-; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: smov w11, v25.b[2]
-; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: smov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: smov w14, v21.b[15]
-; CHECK-GI-NEXT: smov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: smov w13, v25.b[12]
-; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: smov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: smov w8, v26.b[5]
-; CHECK-GI-NEXT: smov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w15
+; CHECK-GI-NEXT: smov w15, v18.h[6]
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: smov w14, v24.h[6]
+; CHECK-GI-NEXT: mov v20.s[2], w16
+; CHECK-GI-NEXT: mov v22.s[1], w18
+; CHECK-GI-NEXT: smov w16, v18.h[2]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: smov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: smov w9, v25.b[10]
-; CHECK-GI-NEXT: smov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: smov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: smov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: smov w12, v26.b[4]
-; CHECK-GI-NEXT: smov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: smov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: smov w13, v26.b[9]
; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
+; CHECK-GI-NEXT: mov v6.s[2], wzr
; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: smov w15, v25.b[15]
-; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[6]
-; CHECK-GI-NEXT: smov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: smov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: smov w10, v26.b[3]
-; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: smov w15, v22.b[0]
-; CHECK-GI-NEXT: smov w8, v26.b[7]
-; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[3]
+; CHECK-GI-NEXT: smov w15, v18.h[7]
+; CHECK-GI-NEXT: fmov s18, w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[2], w13
+; CHECK-GI-NEXT: smov w12, v24.h[3]
+; CHECK-GI-NEXT: smov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v22.s[2], w16
+; CHECK-GI-NEXT: mov v0.s[2], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: smov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: smov w10, v22.b[1]
-; CHECK-GI-NEXT: smov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: smov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: smov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: smov w15, v22.b[12]
-; CHECK-GI-NEXT: smov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[13]
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: smov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: smov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: smov w15, v26.b[15]
-; CHECK-GI-NEXT: smov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: smov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrsb w14, [x1, #32]
-; CHECK-GI-NEXT: smov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrsb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: smov w9, v22.b[7]
-; CHECK-GI-NEXT: smov w11, v22.b[3]
-; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v17.s[3], w10
+; CHECK-GI-NEXT: mov v19.s[3], w11
+; CHECK-GI-NEXT: mov v20.s[3], w12
+; CHECK-GI-NEXT: mov v21.s[3], w13
+; CHECK-GI-NEXT: mov v22.s[3], w14
+; CHECK-GI-NEXT: mov v23.s[3], w15
+; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: smov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: smov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
+; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: add v4.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v19.4s
+; CHECK-GI-NEXT: add v6.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: add v7.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v18.s[3], wzr
+; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v0.4s, v18.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4845,13 +5844,12 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
;
; CHECK-GI-LABEL: test_sdot_v33i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub sp, sp, #96
-; CHECK-GI-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #80] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 96
+; CHECK-GI-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: str x29, [sp, #64] // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 80
; CHECK-GI-NEXT: .cfi_offset w29, -16
; CHECK-GI-NEXT: .cfi_offset b8, -24
; CHECK-GI-NEXT: .cfi_offset b9, -32
@@ -4861,508 +5859,762 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
; CHECK-GI-NEXT: .cfi_offset b13, -64
; CHECK-GI-NEXT: .cfi_offset b14, -72
; CHECK-GI-NEXT: .cfi_offset b15, -80
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w10, w2
-; CHECK-GI-NEXT: sxtb w11, w4
-; CHECK-GI-NEXT: sxtb w12, w5
-; CHECK-GI-NEXT: sxtb w13, w7
-; CHECK-GI-NEXT: fmov s28, w8
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #80]
+; CHECK-GI-NEXT: lsl w11, w1, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #88]
+; CHECK-GI-NEXT: ldr w13, [sp, #128]
+; CHECK-GI-NEXT: ldr w14, [sp, #136]
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s22, w12
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #152]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w16, w7, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: mov v22.h[1], w8
; CHECK-GI-NEXT: ldr w8, [sp, #96]
-; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: sxtb w11, w6
-; CHECK-GI-NEXT: ldr w14, [sp, #528]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s18, wzr
-; CHECK-GI-NEXT: fmov s20, wzr
-; CHECK-GI-NEXT: mov v28.s[1], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #176]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v23.h[1], w9
; CHECK-GI-NEXT: ldr w9, [sp, #104]
-; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-GI-NEXT: fmov s24, w8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #224]
+; CHECK-GI-NEXT: mov v22.h[2], w10
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w4, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v23.h[2], w8
; CHECK-GI-NEXT: ldr w8, [sp, #112]
-; CHECK-GI-NEXT: mov v25.s[1], w12
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #136]
-; CHECK-GI-NEXT: mov v18.s[1], wzr
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v20.s[1], wzr
; CHECK-GI-NEXT: fmov s19, wzr
-; CHECK-GI-NEXT: mov v28.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w3
-; CHECK-GI-NEXT: mov v24.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #128]
-; CHECK-GI-NEXT: mov v25.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #168]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s21, wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[2], wzr
-; CHECK-GI-NEXT: mov v28.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #160]
-; CHECK-GI-NEXT: mov v24.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #120]
-; CHECK-GI-NEXT: fmov s30, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #144]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #200]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[1], wzr
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: mov v30.s[1], w12
-; CHECK-GI-NEXT: ldr w10, [sp, #176]
-; CHECK-GI-NEXT: mov v24.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #224]
-; CHECK-GI-NEXT: ldr w12, [sp, #152]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v22.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #192]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #232]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s23, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #240]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], wzr
-; CHECK-GI-NEXT: mov v20.s[3], wzr
-; CHECK-GI-NEXT: mov v22.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #184]
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: mov v23.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #256]
-; CHECK-GI-NEXT: ldr w11, [sp, #208]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v30.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #264]
-; CHECK-GI-NEXT: mov v26.s[1], w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v22.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #296]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w10
-; CHECK-GI-NEXT: mov v23.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #248]
-; CHECK-GI-NEXT: mov v26.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #304]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: fmov s31, w9
-; CHECK-GI-NEXT: mov v29.s[1], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #312]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s16, wzr
-; CHECK-GI-NEXT: mov v31.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #328]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w11
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #336]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #368]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: fmov s0, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #320]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s12, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v29.s[3], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #376]
-; CHECK-GI-NEXT: mov v0.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #344]
-; CHECK-GI-NEXT: ldr w8, [sp, #288]
-; CHECK-GI-NEXT: mov v12.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #392]
-; CHECK-GI-NEXT: mov v31.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #424]
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #432]
-; CHECK-GI-NEXT: fmov s13, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v12.s[2], w9
-; CHECK-GI-NEXT: fmov s8, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w10, [sp, #440]
-; CHECK-GI-NEXT: ldr w11, [sp, #384]
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
+; CHECK-GI-NEXT: fmov s18, wzr
; CHECK-GI-NEXT: fmov s17, wzr
-; CHECK-GI-NEXT: mov v13.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #408]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w12, [sp, #456]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: mov v12.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #488]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v13.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #496]
-; CHECK-GI-NEXT: mov v0.s[3], w9
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #416]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w9, [sp, #464]
-; CHECK-GI-NEXT: fmov s14, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #504]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #448]
-; CHECK-GI-NEXT: mul v27.4s, v25.4s, v0.4s
-; CHECK-GI-NEXT: mov v13.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #560]
-; CHECK-GI-NEXT: sxtb w15, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #568]
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #520]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #472]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s10, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #552]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s15, w13
-; CHECK-GI-NEXT: mov v8.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #480]
-; CHECK-GI-NEXT: mov v10.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #576]
-; CHECK-GI-NEXT: mov v9.s[2], w15
-; CHECK-GI-NEXT: mul w8, w8, w10
-; CHECK-GI-NEXT: mov v15.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #512]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w13, [sp, #536]
-; CHECK-GI-NEXT: mla v27.4s, v28.4s, v31.4s
-; CHECK-GI-NEXT: mul v30.4s, v30.4s, v13.4s
-; CHECK-GI-NEXT: mov v10.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #592]
-; CHECK-GI-NEXT: fmov s25, w8
-; CHECK-GI-NEXT: mov v14.s[3], w9
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #600]
-; CHECK-GI-NEXT: mla v30.4s, v24.4s, v12.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v10.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #632]
-; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #656]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s28, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #688]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v15.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #544]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v0.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #664]
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #696]
-; CHECK-GI-NEXT: fmov s11, w8
-; CHECK-GI-NEXT: fmov s31, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #672]
-; CHECK-GI-NEXT: ldr w8, [sp, #616]
-; CHECK-GI-NEXT: mov v11.s[1], w9
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #608]
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #704]
-; CHECK-GI-NEXT: ldr w9, [sp, #640]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v24.4s, v26.4s, v14.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #712]
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: mov v31.s[2], w10
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: mov v28.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w10, [sp, #720]
-; CHECK-GI-NEXT: ldr w9, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v26.4s, v29.4s, v15.4s
-; CHECK-GI-NEXT: mla v24.4s, v22.4s, v8.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v0.s[3], w8
-; CHECK-GI-NEXT: mov v31.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #784]
-; CHECK-GI-NEXT: mov v28.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: fmov s13, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #792]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v11.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #760]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w8, [sp, #728]
-; CHECK-GI-NEXT: fmov s14, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s12, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #824]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mla v26.4s, v23.4s, v9.4s
-; CHECK-GI-NEXT: ldr w13, [sp, #984]
-; CHECK-GI-NEXT: mov v14.s[1], w10
-; CHECK-GI-NEXT: sxtb w10, w12
-; CHECK-GI-NEXT: mov v13.s[1], w8
-; CHECK-GI-NEXT: mov v12.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #832]
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: ldr w12, [sp, #768]
-; CHECK-GI-NEXT: ldr w10, [sp, #800]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[3], w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: fmov s20, wzr
; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: lsl w11, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: fmov s7, wzr
; CHECK-GI-NEXT: fmov s2, wzr
-; CHECK-GI-NEXT: mov v29.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #840]
-; CHECK-GI-NEXT: mov v13.s[2], w8
-; CHECK-GI-NEXT: mov v12.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #808]
-; CHECK-GI-NEXT: mov v14.s[2], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w8, [sp, #776]
-; CHECK-GI-NEXT: ldr w10, [sp, #848]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s5, wzr
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v23.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #160]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #168]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v29.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #856]
-; CHECK-GI-NEXT: mov v13.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #864]
-; CHECK-GI-NEXT: mov v14.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #888]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s7, wzr
-; CHECK-GI-NEXT: fmov s15, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #920]
-; CHECK-GI-NEXT: mov v12.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #872]
-; CHECK-GI-NEXT: mov v29.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #896]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s22, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #928]
-; CHECK-GI-NEXT: mov v15.s[1], w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #952]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w11, [sp, #904]
-; CHECK-GI-NEXT: mov v22.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #936]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mov v15.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #960]
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #880]
-; CHECK-GI-NEXT: fmov s23, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w9, [sp, #944]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v22.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #912]
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #968]
-; CHECK-GI-NEXT: mov v23.s[1], w8
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: add v18.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: mov v22.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #992]
-; CHECK-GI-NEXT: fmov s9, w12
-; CHECK-GI-NEXT: mov v23.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #1048]
-; CHECK-GI-NEXT: ldr w12, [sp, #1056]
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v15.4s
-; CHECK-GI-NEXT: sxtb w13, w11
-; CHECK-GI-NEXT: mov v8.s[3], w9
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: ldr w9, [sp, #1000]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w10, [sp, #1016]
-; CHECK-GI-NEXT: ldr w8, [sp, #816]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s3, wzr
+; CHECK-GI-NEXT: mov v24.h[1], w12
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: mov v22.h[5], w11
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[5], w9
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w9, [sp, #192]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: mov v24.h[2], w8
+; CHECK-GI-NEXT: mov v22.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #208]
+; CHECK-GI-NEXT: mov v23.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #216]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #200]
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: mov v24.h[3], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[7], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #288]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v23.h[7], w10
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s27, w12
+; CHECK-GI-NEXT: ldr w10, [sp, #232]
+; CHECK-GI-NEXT: sbfx w16, w14, #8, #8
+; CHECK-GI-NEXT: mov v24.h[4], w15
+; CHECK-GI-NEXT: lsl w15, w11, #8
+; CHECK-GI-NEXT: sbfx w14, w18, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #296]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #344]
+; CHECK-GI-NEXT: mov v27.h[1], w13
+; CHECK-GI-NEXT: lsl w13, w17, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #240]
+; CHECK-GI-NEXT: sbfx w17, w10, #8, #8
+; CHECK-GI-NEXT: mov v25.h[1], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #352]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.h[5], w15
+; CHECK-GI-NEXT: mov v27.h[2], w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #304]
+; CHECK-GI-NEXT: fmov s26, w16
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #248]
+; CHECK-GI-NEXT: mov v25.h[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v24.h[6], w9
+; CHECK-GI-NEXT: lsl w16, w11, #8
+; CHECK-GI-NEXT: mov v26.h[1], w13
+; CHECK-GI-NEXT: mov v27.h[3], w17
+; CHECK-GI-NEXT: sbfx w13, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #312]
+; CHECK-GI-NEXT: ldr w17, [sp, #328]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #256]
+; CHECK-GI-NEXT: ldr w11, [sp, #264]
+; CHECK-GI-NEXT: mov v25.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #368]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v26.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #320]
+; CHECK-GI-NEXT: mov v27.h[4], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #408]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #376]
+; CHECK-GI-NEXT: mov v25.h[4], w9
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: mov v26.h[3], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #416]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w17, #8
+; CHECK-GI-NEXT: mov v27.h[5], w9
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v25.h[5], w14
+; CHECK-GI-NEXT: fmov s29, w15
+; CHECK-GI-NEXT: ldr w14, [sp, #384]
+; CHECK-GI-NEXT: ldr w15, [sp, #472]
+; CHECK-GI-NEXT: mov v26.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #424]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v29.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #480]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v25.h[6], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #432]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v26.h[5], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #392]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v29.h[2], w13
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #336]
+; CHECK-GI-NEXT: ldr w13, [sp, #488]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #440]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v28.h[1], w12
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v29.h[3], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #496]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #400]
+; CHECK-GI-NEXT: mov v26.h[6], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #448]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v28.h[2], w13
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v25.h[7], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #456]
+; CHECK-GI-NEXT: ldr w15, [sp, #504]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v28.h[3], w16
+; CHECK-GI-NEXT: ldr w15, [sp, #512]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mul v30.8h, v22.8h, v25.8h
+; CHECK-GI-NEXT: mov v26.h[7], w9
+; CHECK-GI-NEXT: mov v29.h[5], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #464]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[4], w9
+; CHECK-GI-NEXT: mov v27.h[6], w10
+; CHECK-GI-NEXT: ldr w16, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v30.h[0]
+; CHECK-GI-NEXT: mov v24.h[7], w8
+; CHECK-GI-NEXT: lsl w8, w17, #8
+; CHECK-GI-NEXT: mov v29.h[6], w11
+; CHECK-GI-NEXT: mul v26.8h, v23.8h, v26.8h
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: smov w16, v30.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #528]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[5], w10
+; CHECK-GI-NEXT: mov v27.h[7], w14
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: sbfx w10, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v30.h[4]
+; CHECK-GI-NEXT: mov v29.h[7], w8
+; CHECK-GI-NEXT: smov w15, v26.h[0]
+; CHECK-GI-NEXT: smov w13, v30.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #544]
+; CHECK-GI-NEXT: ldr w11, [sp, #552]
+; CHECK-GI-NEXT: mov v22.s[1], w16
+; CHECK-GI-NEXT: smov w16, v26.h[4]
+; CHECK-GI-NEXT: mov v28.h[6], w10
+; CHECK-GI-NEXT: smov w10, v26.h[1]
+; CHECK-GI-NEXT: fmov s23, w14
+; CHECK-GI-NEXT: smov w14, v26.h[5]
+; CHECK-GI-NEXT: mul v29.8h, v24.8h, v29.8h
+; CHECK-GI-NEXT: fmov s24, w15
+; CHECK-GI-NEXT: smov w15, v26.h[2]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: smov w8, v30.h[5]
+; CHECK-GI-NEXT: smov w17, v30.h[7]
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: mov v22.s[2], w13
+; CHECK-GI-NEXT: smov w13, v30.h[3]
+; CHECK-GI-NEXT: mov v24.s[1], w10
+; CHECK-GI-NEXT: smov w16, v26.h[6]
+; CHECK-GI-NEXT: sbfx w10, w12, #8, #8
+; CHECK-GI-NEXT: smov w18, v29.h[0]
+; CHECK-GI-NEXT: smov w0, v29.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v25.s[1], w14
+; CHECK-GI-NEXT: smov w14, v26.h[7]
+; CHECK-GI-NEXT: mov v28.h[7], w10
+; CHECK-GI-NEXT: mov v22.s[3], w13
+; CHECK-GI-NEXT: smov w13, v26.h[3]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: fmov s26, w18
+; CHECK-GI-NEXT: mov v23.s[1], w8
+; CHECK-GI-NEXT: smov w8, v30.h[6]
+; CHECK-GI-NEXT: mov v25.s[2], w16
+; CHECK-GI-NEXT: lsl w16, w9, #8
+; CHECK-GI-NEXT: mul v31.8h, v27.8h, v28.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #584]
+; CHECK-GI-NEXT: mov v24.s[3], w13
+; CHECK-GI-NEXT: smov w13, v29.h[4]
+; CHECK-GI-NEXT: mov v26.s[1], w0
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.s[2], w8
+; CHECK-GI-NEXT: mov v25.s[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #608]
+; CHECK-GI-NEXT: ldr w8, [sp, #576]
+; CHECK-GI-NEXT: fmov s8, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #616]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s27, w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v26.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[5]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #624]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v8.h[1], w11
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v23.s[3], w17
+; CHECK-GI-NEXT: fmov s9, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: smov w17, v31.h[1]
+; CHECK-GI-NEXT: mov v27.s[1], w15
+; CHECK-GI-NEXT: smov w15, v31.h[0]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v8.h[2], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v9.h[1], w16
+; CHECK-GI-NEXT: smov w16, v31.h[2]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #592]
+; CHECK-GI-NEXT: ldr w12, [sp, #600]
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: smov w15, v29.h[6]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v8.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #640]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v9.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #672]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v28.s[1], w17
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v27.s[2], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #680]
+; CHECK-GI-NEXT: mov v8.h[4], w8
+; CHECK-GI-NEXT: smov w8, v31.h[4]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v9.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #688]
+; CHECK-GI-NEXT: mov v28.s[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #648]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #656]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s30, w8
+; CHECK-GI-NEXT: sbfx w8, w10, #8, #8
+; CHECK-GI-NEXT: smov w10, v31.h[5]
+; CHECK-GI-NEXT: mov v8.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #696]
+; CHECK-GI-NEXT: mov v10.h[1], w15
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v9.h[4], w8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #704]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: ldr w17, [sp, #768]
+; CHECK-GI-NEXT: mov v30.s[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #744]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: mov v10.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v9.h[5], w16
+; CHECK-GI-NEXT: mov v8.h[6], w11
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #720]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v10.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #752]
+; CHECK-GI-NEXT: mov v8.h[7], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #760]
+; CHECK-GI-NEXT: ldr w8, [sp, #784]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mla v0.4s, v10.4s, v29.4s
-; CHECK-GI-NEXT: fmov s10, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #1024]
-; CHECK-GI-NEXT: mul v20.4s, v11.4s, v8.4s
-; CHECK-GI-NEXT: ldr q8, [sp] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v9.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #1008]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: mov v10.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1064]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v10.h[4], w12
+; CHECK-GI-NEXT: sbfx w12, w15, #8, #8
+; CHECK-GI-NEXT: mov v11.h[1], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #776]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v9.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w11, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #808]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v11.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #816]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], wzr
-; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v9.h[7], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #824]
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: mov v10.h[5], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #832]
+; CHECK-GI-NEXT: mov v11.h[3], w13
+; CHECK-GI-NEXT: sbfx w15, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w17, #8
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #872]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #840]
+; CHECK-GI-NEXT: sbfx w13, w18, #8, #8
+; CHECK-GI-NEXT: sbfx w17, w12, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #856]
+; CHECK-GI-NEXT: mov v12.h[1], w15
+; CHECK-GI-NEXT: mov v11.h[4], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #880]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v10.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #848]
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: sbfx w15, w16, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #888]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.s[1], wzr
+; CHECK-GI-NEXT: mov v12.h[2], w17
+; CHECK-GI-NEXT: lsl w17, w10, #8
+; CHECK-GI-NEXT: mov v11.h[5], w9
+; CHECK-GI-NEXT: fmov s13, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #936]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #864]
+; CHECK-GI-NEXT: mov v12.h[3], w15
+; CHECK-GI-NEXT: mov v11.h[6], w8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #1000]
+; CHECK-GI-NEXT: mov v13.h[1], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #944]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #896]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v12.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1008]
+; CHECK-GI-NEXT: fmov s14, w9
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v13.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #952]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w17, w8, #8
+; CHECK-GI-NEXT: smov w8, v29.h[3]
+; CHECK-GI-NEXT: smov w9, v29.h[7]
+; CHECK-GI-NEXT: fmov s29, w11
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v14.h[1], w15
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #904]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v12.h[5], w13
+; CHECK-GI-NEXT: mov v13.h[3], w14
+; CHECK-GI-NEXT: mov v29.h[1], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #960]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #1016]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #1024]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v14.h[2], w16
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #912]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #968]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #976]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v14.h[3], w15
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v29.h[2], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #1032]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #920]
+; CHECK-GI-NEXT: mov v26.s[3], w8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v14.h[4], w11
+; CHECK-GI-NEXT: mov v29.h[3], w13
+; CHECK-GI-NEXT: ldr w11, [sp, #984]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v13.h[5], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #1040]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #928]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[7], w10
+; CHECK-GI-NEXT: mov v27.s[3], w9
+; CHECK-GI-NEXT: mov v14.h[5], w12
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[6], w14
+; CHECK-GI-NEXT: ldr w12, [sp, #1048]
+; CHECK-GI-NEXT: sbfx w14, w16, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #728]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mul v15.8h, v8.8h, v12.8h
+; CHECK-GI-NEXT: smov w16, v31.h[6]
+; CHECK-GI-NEXT: mov v14.h[6], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #992]
+; CHECK-GI-NEXT: mov v29.h[5], w14
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v13.h[7], w13
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #792]
+; CHECK-GI-NEXT: ldr w14, [sp, #1056]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v30.s[2], w16
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: smov w8, v15.h[1]
+; CHECK-GI-NEXT: smov w9, v15.h[5]
+; CHECK-GI-NEXT: mov v29.h[6], w12
+; CHECK-GI-NEXT: lsl w12, w13, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v10.h[7], w11
+; CHECK-GI-NEXT: mov v14.h[7], w10
+; CHECK-GI-NEXT: mul v12.8h, v9.8h, v13.8h
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w10, v15.h[0]
+; CHECK-GI-NEXT: smov w11, v15.h[4]
+; CHECK-GI-NEXT: smov w14, v31.h[7]
+; CHECK-GI-NEXT: smov w15, v31.h[3]
+; CHECK-GI-NEXT: mov v11.h[7], w12
+; CHECK-GI-NEXT: mov v29.h[7], w13
; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mul v13.8h, v10.8h, v14.8h
+; CHECK-GI-NEXT: smov w12, v12.h[0]
+; CHECK-GI-NEXT: smov w13, v12.h[1]
+; CHECK-GI-NEXT: mov v7.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1080]
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w10, [sp, #1032]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: mov v29.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #1072]
-; CHECK-GI-NEXT: mov v19.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: fmov s31, w11
+; CHECK-GI-NEXT: mov v30.s[3], w14
+; CHECK-GI-NEXT: smov w11, v12.h[4]
+; CHECK-GI-NEXT: mul v14.8h, v11.8h, v29.8h
+; CHECK-GI-NEXT: fmov s29, w10
+; CHECK-GI-NEXT: smov w10, v15.h[2]
+; CHECK-GI-NEXT: smov w14, v13.h[0]
+; CHECK-GI-NEXT: fmov s8, w12
+; CHECK-GI-NEXT: smov w16, v13.h[1]
+; CHECK-GI-NEXT: mov v31.s[1], w9
+; CHECK-GI-NEXT: smov w9, v12.h[2]
+; CHECK-GI-NEXT: mov v28.s[3], w15
+; CHECK-GI-NEXT: mov v29.s[1], w8
+; CHECK-GI-NEXT: smov w8, v15.h[6]
+; CHECK-GI-NEXT: smov w15, v12.h[5]
+; CHECK-GI-NEXT: mov v8.s[1], w13
+; CHECK-GI-NEXT: fmov s9, w11
+; CHECK-GI-NEXT: smov w12, v15.h[3]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: smov w14, v13.h[2]
+; CHECK-GI-NEXT: smov w11, v12.h[6]
+; CHECK-GI-NEXT: smov w13, v15.h[7]
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v31.s[2], w8
+; CHECK-GI-NEXT: smov w8, v13.h[4]
+; CHECK-GI-NEXT: mov v29.s[2], w10
+; CHECK-GI-NEXT: mov v10.s[1], w16
+; CHECK-GI-NEXT: smov w16, v14.h[0]
+; CHECK-GI-NEXT: mov v8.s[2], w9
+; CHECK-GI-NEXT: smov w9, v13.h[5]
+; CHECK-GI-NEXT: smov w10, v12.h[3]
+; CHECK-GI-NEXT: mov v9.s[1], w15
+; CHECK-GI-NEXT: smov w15, v13.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mul w8, w8, w9
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: fmov s11, w8
+; CHECK-GI-NEXT: smov w8, v14.h[1]
+; CHECK-GI-NEXT: mov v29.s[3], w12
+; CHECK-GI-NEXT: mov v10.s[2], w14
+; CHECK-GI-NEXT: smov w14, v12.h[7]
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: smov w12, v14.h[4]
+; CHECK-GI-NEXT: mov v8.s[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #536]
+; CHECK-GI-NEXT: mov v11.s[1], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v9.s[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #800]
+; CHECK-GI-NEXT: mov v12.s[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1064]
+; CHECK-GI-NEXT: mov v31.s[3], w13
+; CHECK-GI-NEXT: smov w13, v14.h[5]
+; CHECK-GI-NEXT: sxtb w9, w9
; CHECK-GI-NEXT: sxtb w10, w10
; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v11.s[2], w15
+; CHECK-GI-NEXT: smov w15, v13.h[3]
+; CHECK-GI-NEXT: smov w16, v13.h[7]
+; CHECK-GI-NEXT: fmov s13, w12
+; CHECK-GI-NEXT: mul w9, w9, w10
+; CHECK-GI-NEXT: smov w12, v14.h[2]
+; CHECK-GI-NEXT: mul w8, w11, w8
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: mov v21.s[2], wzr
; CHECK-GI-NEXT: mov v16.s[2], wzr
+; CHECK-GI-NEXT: mov v18.s[2], wzr
; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v13.s[1], w13
+; CHECK-GI-NEXT: smov w13, v14.h[6]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: sxth w10, w8
+; CHECK-GI-NEXT: mov v20.s[2], wzr
; CHECK-GI-NEXT: mov v6.s[2], wzr
+; CHECK-GI-NEXT: mov v7.s[2], wzr
; CHECK-GI-NEXT: mov v2.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], wzr
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: mov v10.s[3], w11
-; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
-; CHECK-GI-NEXT: ldr w9, [sp, #976]
-; CHECK-GI-NEXT: fmov s21, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #1040]
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: add v22.4s, v22.4s, v23.4s
+; CHECK-GI-NEXT: add v25.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: fmov s23, w9
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v12.s[2], w12
+; CHECK-GI-NEXT: mov v13.s[2], w13
+; CHECK-GI-NEXT: smov w8, v14.h[3]
+; CHECK-GI-NEXT: smov w9, v14.h[7]
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v0.s[2], wzr
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[3], wzr
; CHECK-GI-NEXT: mov v16.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[3], wzr
; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.16b, v8.16b
-; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v20.s[3], wzr
; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v7.s[3], wzr
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], wzr
-; CHECK-GI-NEXT: mov v25.s[1], wzr
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mul v8.4s, v13.4s, v9.4s
-; CHECK-GI-NEXT: mul v9.4s, v14.4s, v10.4s
-; CHECK-GI-NEXT: mov v23.s[3], w9
-; CHECK-GI-NEXT: mov v29.s[3], w8
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: mov v23.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], wzr
+; CHECK-GI-NEXT: mov v9.s[3], w14
+; CHECK-GI-NEXT: mov v10.s[3], w15
+; CHECK-GI-NEXT: mov v11.s[3], w16
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: add v16.4s, v16.4s, v17.4s
-; CHECK-GI-NEXT: add v3.4s, v3.4s, v6.4s
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
-; CHECK-GI-NEXT: add v4.4s, v4.4s, v7.4s
-; CHECK-GI-NEXT: mov v25.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mla v20.4s, v28.4s, v22.4s
-; CHECK-GI-NEXT: mla v8.4s, v31.4s, v23.4s
-; CHECK-GI-NEXT: mla v9.4s, v12.4s, v29.4s
-; CHECK-GI-NEXT: add v5.4s, v19.4s, v16.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v18.4s
-; CHECK-GI-NEXT: add v3.4s, v11.4s, v3.4s
+; CHECK-GI-NEXT: mov v12.s[3], w8
+; CHECK-GI-NEXT: mov v13.s[3], w9
+; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
+; CHECK-GI-NEXT: add v16.4s, v16.4s, v18.4s
+; CHECK-GI-NEXT: add v17.4s, v17.4s, v20.4s
+; CHECK-GI-NEXT: add v6.4s, v6.4s, v7.4s
; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v4.4s, v27.4s, v30.4s
-; CHECK-GI-NEXT: add v6.4s, v24.4s, v26.4s
-; CHECK-GI-NEXT: ldr x29, [sp, #80] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v25.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v20.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v5.4s
-; CHECK-GI-NEXT: add v5.4s, v8.4s, v9.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v2.4s
-; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v25.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: add v2.4s, v21.4s, v2.4s
-; CHECK-GI-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v3.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v3.4s, v5.4s
+; CHECK-GI-NEXT: mov v23.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], wzr
+; CHECK-GI-NEXT: add v26.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v27.4s, v28.4s, v30.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v19.4s
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
+; CHECK-GI-NEXT: add v5.4s, v29.4s, v31.4s
+; CHECK-GI-NEXT: add v7.4s, v8.4s, v9.4s
+; CHECK-GI-NEXT: add v16.4s, v10.4s, v11.4s
+; CHECK-GI-NEXT: add v17.4s, v12.4s, v13.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v6.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v23.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], wzr
+; CHECK-GI-NEXT: add v3.4s, v22.4s, v25.4s
+; CHECK-GI-NEXT: add v6.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-GI-NEXT: add v4.4s, v5.4s, v7.4s
+; CHECK-GI-NEXT: add v5.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v2.4s
+; CHECK-GI-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
+; CHECK-GI-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v2.4s, v3.4s, v6.4s
+; CHECK-GI-NEXT: add v1.4s, v23.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v0.4s, v24.4s, v0.4s
+; CHECK-GI-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: add sp, sp, #96
+; CHECK-GI-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <33 x i8> %a to <33 x i32>
diff --git a/llvm/test/CodeGen/AArch64/neon-extmul.ll b/llvm/test/CodeGen/AArch64/neon-extmul.ll
index c82f8e1..84b634d 100644
--- a/llvm/test/CodeGen/AArch64/neon-extmul.ll
+++ b/llvm/test/CodeGen/AArch64/neon-extmul.ll
@@ -12,10 +12,9 @@ define <8 x i32> @extmuls_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: smull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -34,10 +33,9 @@ define <8 x i32> @extmulu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -79,12 +77,9 @@ define <8 x i32> @extmuladds_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: smlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: saddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: saddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -104,12 +99,9 @@ define <8 x i32> @extmuladdu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -163,16 +155,13 @@ define <8 x i64> @extmuls_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: smull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: smull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: smull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: smull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: sshll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: sshll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: sshll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: sshll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -195,16 +184,13 @@ define <8 x i64> @extmulu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: umull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
@@ -263,20 +249,13 @@ define <8 x i64> @extmuladds_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: smlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: smlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: smlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: smlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: saddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: saddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: saddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: saddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -301,20 +280,13 @@ define <8 x i64> @extmuladdu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
diff --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
index 17ad298..3caac1d 100644
--- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
+++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
@@ -1,40 +1,72 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
%struct.anon = type { ptr, ptr }
@ptr_wrapper = common global ptr null, align 8
define i32 @test_func_i32_two_uses(i32 %in, i32 %bit, i32 %mask) {
-; CHECK-LABEL: test_func_i32_two_uses:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB0_3
-; CHECK-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: lsl w1, w1, #1
-; CHECK-NEXT: cbz w1, .LBB0_6
-; CHECK-NEXT: .LBB0_3: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands w10, w1, w0
-; CHECK-NEXT: and w11, w2, w0
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cmp w10, w11
-; CHECK-NEXT: b.eq .LBB0_1
-; CHECK-NEXT: // %bb.4: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbnz w2, .LBB0_1
-; CHECK-NEXT: // %bb.5: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbz w10, .LBB0_2
-; CHECK-NEXT: b .LBB0_1
-; CHECK-NEXT: .LBB0_6: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i32_two_uses:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB0_3
+; CHECK-SD-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: lsl w1, w1, #1
+; CHECK-SD-NEXT: cbz w1, .LBB0_6
+; CHECK-SD-NEXT: .LBB0_3: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands w10, w1, w0
+; CHECK-SD-NEXT: and w11, w2, w0
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cmp w10, w11
+; CHECK-SD-NEXT: b.eq .LBB0_1
+; CHECK-SD-NEXT: // %bb.4: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbnz w2, .LBB0_1
+; CHECK-SD-NEXT: // %bb.5: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbz w10, .LBB0_2
+; CHECK-SD-NEXT: b .LBB0_1
+; CHECK-SD-NEXT: .LBB0_6: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i32_two_uses:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB0_3
+; CHECK-GI-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: lsl w1, w1, #1
+; CHECK-GI-NEXT: cbz w1, .LBB0_6
+; CHECK-GI-NEXT: .LBB0_3: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and w10, w1, w0
+; CHECK-GI-NEXT: tst w1, w0
+; CHECK-GI-NEXT: and w11, w2, w0
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cmp w10, w11
+; CHECK-GI-NEXT: b.eq .LBB0_1
+; CHECK-GI-NEXT: // %bb.4: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbnz w2, .LBB0_1
+; CHECK-GI-NEXT: // %bb.5: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbz w10, .LBB0_2
+; CHECK-GI-NEXT: b .LBB0_1
+; CHECK-GI-NEXT: .LBB0_6: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -70,28 +102,52 @@ do.end: ; preds = %4
}
define i32 @test_func_i64_one_use(i64 %in, i64 %bit, i64 %mask) {
-; CHECK-LABEL: test_func_i64_one_use:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: lsl x1, x1, #1
-; CHECK-NEXT: cbz x1, .LBB1_4
-; CHECK-NEXT: .LBB1_2: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands x10, x1, x0
-; CHECK-NEXT: orr x10, x2, x10
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cbz x10, .LBB1_1
-; CHECK-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: b .LBB1_1
-; CHECK-NEXT: .LBB1_4: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i64_one_use:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB1_2
+; CHECK-SD-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: lsl x1, x1, #1
+; CHECK-SD-NEXT: cbz x1, .LBB1_4
+; CHECK-SD-NEXT: .LBB1_2: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands x10, x1, x0
+; CHECK-SD-NEXT: orr x10, x2, x10
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cbz x10, .LBB1_1
+; CHECK-SD-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: b .LBB1_1
+; CHECK-SD-NEXT: .LBB1_4: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i64_one_use:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB1_2
+; CHECK-GI-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: lsl x1, x1, #1
+; CHECK-GI-NEXT: cbz x1, .LBB1_4
+; CHECK-GI-NEXT: .LBB1_2: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and x10, x1, x0
+; CHECK-GI-NEXT: tst x1, x0
+; CHECK-GI-NEXT: orr x10, x2, x10
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cbz x10, .LBB1_1
+; CHECK-GI-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: b .LBB1_1
+; CHECK-GI-NEXT: .LBB1_4: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -124,11 +180,18 @@ do.end: ; preds = %4
}
define i64 @test_and1(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and1:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ands x8, x0, #0x3
-; CHECK-NEXT: csel x0, x8, x1, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ands x8, x0, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x1, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and x8, x0, #0x3
+; CHECK-GI-NEXT: tst x0, #0x3
+; CHECK-GI-NEXT: csel x0, x8, x1, eq
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%c = icmp eq i64 %a, 0
%s = select i1 %c, i64 %a, i64 %y
@@ -148,23 +211,43 @@ define i64 @test_and2(i64 %x, i64 %y) {
}
define i64 @test_and3(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and3:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: mov x0, xzr
-; CHECK-NEXT: mov x19, x1
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x20, #0x3
-; CHECK-NEXT: csel x0, x8, x19, eq
-; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 32
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NEXT: mov x20, x0
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: mov x19, x1
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x20, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x19, eq
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: and x21, x0, #0x3
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: mov x20, x1
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x21, x20, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 0)
%c = icmp eq i64 %a, 0
@@ -173,19 +256,37 @@ define i64 @test_and3(i64 %x, i64 %y) {
}
define i64 @test_and_4(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and_4:
-; CHECK: // %bb.0:
-; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: ands x0, x0, #0x3
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x19, #0x3
-; CHECK-NEXT: csel x0, x8, x0, eq
-; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and_4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: mov x19, x0
+; CHECK-SD-NEXT: ands x0, x0, #0x3
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x19, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x0, eq
+; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and_4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: and x20, x0, #0x3
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: mov x0, x20
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x20, x0, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 %a)
%c = icmp eq i64 %a, 0
diff --git a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll
index 2a77d4d..4206c0bc 100644
--- a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll
+++ b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll
@@ -27,11 +27,12 @@ define i32 @caller() nounwind ssp {
; CHECK-NEXT: sub sp, sp, #208
; CHECK-NEXT: mov w8, #10 ; =0xa
; CHECK-NEXT: mov w9, #9 ; =0x9
-; CHECK-NEXT: mov w10, #8 ; =0x8
+; CHECK-NEXT: mov w0, #1 ; =0x1
; CHECK-NEXT: stp x9, x8, [sp, #24]
-; CHECK-NEXT: mov w8, #7 ; =0x7
+; CHECK-NEXT: mov w8, #8 ; =0x8
; CHECK-NEXT: mov w9, #6 ; =0x6
-; CHECK-NEXT: mov w0, #1 ; =0x1
+; CHECK-NEXT: str x8, [sp, #16]
+; CHECK-NEXT: mov w8, #7 ; =0x7
; CHECK-NEXT: mov w1, #2 ; =0x2
; CHECK-NEXT: mov w2, #3 ; =0x3
; CHECK-NEXT: mov w3, #4 ; =0x4
@@ -46,8 +47,7 @@ define i32 @caller() nounwind ssp {
; CHECK-NEXT: stp x22, x21, [sp, #160] ; 16-byte Folded Spill
; CHECK-NEXT: stp x20, x19, [sp, #176] ; 16-byte Folded Spill
; CHECK-NEXT: stp x29, x30, [sp, #192] ; 16-byte Folded Spill
-; CHECK-NEXT: stp x8, x10, [sp, #8]
-; CHECK-NEXT: str x9, [sp]
+; CHECK-NEXT: stp x9, x8, [sp]
; CHECK-NEXT: bl _callee
; CHECK-NEXT: ldp x29, x30, [sp, #192] ; 16-byte Folded Reload
; CHECK-NEXT: ldp x20, x19, [sp, #176] ; 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir b/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir
new file mode 100644
index 0000000..aecb90a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir
@@ -0,0 +1,23 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=false -o - %s | FileCheck %s
+# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=true -o - %s | FileCheck %s
+---
+name: test
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: test
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x0 = COPY $x1
+ ; CHECK-NEXT: renamable $w1 = ORRWrr $wzr, renamable $w0, implicit-def renamable $x1
+ ; CHECK-NEXT: RET_ReallyLR implicit $x1, implicit $x0
+ %190:gpr64 = COPY killed $x1
+ %191:gpr32 = COPY %190.sub_32:gpr64
+ %192:gpr32 = ORRWrr $wzr, killed %191:gpr32
+ %193:gpr64all = SUBREG_TO_REG 0, killed %192:gpr32, %subreg.sub_32
+ $x0 = COPY killed %190:gpr64
+ $x1 = COPY killed %193:gpr64all
+ RET_ReallyLR implicit $x1, implicit $x0
+...
diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
index 08fc47d..eb6242c 100644
--- a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
+++ b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
@@ -7,9 +7,18 @@
# CHECK-DBG: ********** JOINING INTERVALS ***********
# CHECK-DBG: ********** INTERVALS **********
# CHECK-DBG: %0 [16r,32r:0) 0@16r weight:0.000000e+00
-# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00
-# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [112e,112d:0) 0@112e L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00
+# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000080 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00
+# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00
# CHECK-DBG: %5 [32r,112r:1)[112r,112d:0) 0@112r 1@32r weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0.entry:
+# CHECK-DBG: 16B %0:gpr64sp = ADDXri %stack.0, 0, 0
+# CHECK-DBG: 32B %5:gpr64common = nuw ADDXri %0:gpr64sp, 64, 0
+# CHECK-DBG: 48B undef %3.sub_32:gpr64 = MOVi32imm 64, implicit-def %3:gpr64
+# CHECK-DBG: 80B undef %4.sub_32:gpr64 = MOVi32imm 64, implicit-def %4:gpr64
+# CHECK-DBG: 112B dead %5:gpr64common, dead early-clobber %4:gpr64 = MOPSMemorySetPseudo %5:gpr64common(tied-def 0), %4:gpr64(tied-def 1), %3:gpr64, implicit-def dead $nzcv
+# CHECK-DBG: 128B RET_ReallyLR
+
---
name: test
tracksRegLiveness: true
@@ -43,9 +52,44 @@ body: |
# CHECK-DBG: %1 [32r,48B:2)[48B,320r:0)[320r,368B:1) 0@48B-phi 1@320r 2@32r
# CHECK-DBG-SAME: weight:0.000000e+00
# CHECK-DBG: %3 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi
-# CHECK-DBG-SAME: L0000000000000080 [288r,304B:0)[304B,320r:3) 0@288r 1@x 2@x 3@304B-phi
+# CHECK-DBG-SAME: L0000000000000080 [240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@x 3@304B-phi
# CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi
# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0:
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF
+# CHECK-DBG: 48B bb.1:
+# CHECK-DBG: ; predecessors: %bb.0, %bb.7
+# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%)
+# CHECK-DBG: 64B bb.2:
+# CHECK-DBG: ; predecessors: %bb.1
+# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%)
+# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1
+# CHECK-DBG: 96B bb.3:
+# CHECK-DBG: ; predecessors: %bb.2
+# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%)
+# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv
+# CHECK-DBG: 160B bb.4:
+# CHECK-DBG: ; predecessors: %bb.3
+# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%)
+# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv
+# CHECK-DBG: 208B bb.5:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 240B undef %3.sub_32:gpr64 = MOVi32imm 1, implicit-def %3:gpr64
+# CHECK-DBG: 256B B %bb.7
+# CHECK-DBG: 272B bb.6:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 288B %3:gpr64 = COPY $xzr
+# CHECK-DBG: 304B bb.7:
+# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 320B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1
+# CHECK-DBG: 352B B %bb.1
---
name: reproducer
tracksRegLiveness: true
@@ -92,6 +136,42 @@ body: |
# CHECK-DBG-SAME: L0000000000000080 [224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@x 3@288B-phi
# CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@80r 3@288B-phi
# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0:
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF
+# CHECK-DBG: 48B bb.1:
+# CHECK-DBG: ; predecessors: %bb.0, %bb.7
+# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%)
+# CHECK-DBG: 64B bb.2:
+# CHECK-DBG: ; predecessors: %bb.1
+# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%)
+# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1
+# CHECK-DBG: 96B bb.3:
+# CHECK-DBG: ; predecessors: %bb.2
+# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%)
+# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv
+# CHECK-DBG: 160B bb.4:
+# CHECK-DBG: ; predecessors: %bb.3
+# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%)
+# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv
+# CHECK-DBG: 208B bb.5:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 224B %3:gpr64 = IMPLICIT_DEF
+# CHECK-DBG: 240B B %bb.7
+# CHECK-DBG: 256B bb.6:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 272B %3:gpr64 = COPY $xzr
+# CHECK-DBG: 288B bb.7:
+# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 304B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1
+# CHECK-DBG: 336B B %bb.1
+
---
name: reproducer2
tracksRegLiveness: true
@@ -127,3 +207,78 @@ body: |
B %bb.1
...
+# CHECK-DBG: ********** REGISTER COALESCER **********
+# CHECK-DBG: ********** Function: reproducer3
+# CHECK-DBG: ********** JOINING INTERVALS ***********
+# CHECK-DBG: ********** INTERVALS **********
+# CHECK-DBG: W0 [0B,32r:0)[320r,336r:1) 0@0B-phi 1@320r
+# CHECK-DBG: W1 [0B,16r:0) 0@0B-phi
+# CHECK-DBG: %0 [16r,64r:0) 0@16r weight:0.000000e+00
+# CHECK-DBG: %1 [32r,128r:0) 0@32r weight:0.000000e+00
+# CHECK-DBG: %2 [48r,64r:0) 0@48r weight:0.000000e+00
+# CHECK-DBG: %3 [64r,80r:0) 0@64r weight:0.000000e+00
+# CHECK-DBG: %4 [80r,176r:0) 0@80r weight:0.000000e+00
+# CHECK-DBG: %7 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r
+# CHECK-DBG-SAME: L0000000000000080 [128r,256r:0)[304B,320r:0) 0@128r
+# CHECK-DBG-SAME: L0000000000000040 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r
+# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: %8 [96r,176r:1)[176r,192r:0) 0@176r 1@96r weight:0.000000e+00
+# CHECK-DBG: %9 [256r,272r:0) 0@256r weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0:
+# CHECK-DBG: successors: %bb.2(0x40000000), %bb.1(0x40000000); %bb.2(50.00%), %bb.1(50.00%)
+# CHECK-DBG: liveins: $w0, $w1
+# CHECK-DBG: 16B %0:gpr32 = COPY $w1
+# CHECK-DBG: 32B %1:gpr32 = COPY $w0
+# CHECK-DBG: 48B %2:gpr32 = UBFMWri %1:gpr32, 31, 30
+# CHECK-DBG: 64B %3:gpr32 = SUBWrs %2:gpr32, %0:gpr32, 1
+# CHECK-DBG: 80B %4:gpr32 = UBFMWri %3:gpr32, 1, 31
+# CHECK-DBG: 96B %8:gpr32common = MOVi32imm 1
+# CHECK-DBG: 112B undef %7.sub_32:gpr64 = MOVi32imm 1
+# CHECK-DBG: 128B undef %7.sub_32:gpr64 = BFMWri %7.sub_32:gpr64(tied-def 0), %1:gpr32, 31, 30, implicit-def %7:gpr64
+# CHECK-DBG: 176B %8:gpr32common = BFMWri %8:gpr32common(tied-def 0), %4:gpr32, 30, 29
+# CHECK-DBG: 192B dead $wzr = SUBSWri %8:gpr32common, 0, 0, implicit-def $nzcv
+# CHECK-DBG: 208B Bcc 2, %bb.2, implicit killed $nzcv
+# CHECK-DBG: 224B B %bb.1
+# CHECK-DBG: 240B bb.1:
+# CHECK-DBG: ; predecessors: %bb.0
+# CHECK-DBG: 256B %9:gpr64common = UBFMXri %7:gpr64, 62, 61
+# CHECK-DBG: 272B dead $xzr = LDRXui %9:gpr64common, 0
+# CHECK-DBG: 288B RET_ReallyLR
+# CHECK-DBG: 304B bb.2:
+# CHECK-DBG: ; predecessors: %bb.0
+# CHECK-DBG: 320B $x0 = COPY %7:gpr64
+# CHECK-DBG: 336B RET_ReallyLR implicit $x0
+
+---
+name: reproducer3
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ %0:gpr32 = COPY killed $w1
+ %1:gpr32 = COPY killed $w0
+ %3:gpr32 = UBFMWri %1, 31, 30
+ %4:gpr32 = SUBWrs killed %3, killed %0, 1
+ %5:gpr32 = UBFMWri killed %4, 1, 31
+ %6:gpr32 = MOVi32imm 1
+ %7:gpr32 = COPY %6
+ %7:gpr32 = BFMWri %7, killed %1, 31, 30
+ %8:gpr64 = SUBREG_TO_REG 0, killed %7, %subreg.sub_32
+ %9:gpr32common = COPY killed %6
+ %9:gpr32common = BFMWri %9, killed %5, 30, 29
+ dead $wzr = SUBSWri killed %9, 0, 0, implicit-def $nzcv
+ Bcc 2, %bb.2, implicit killed $nzcv
+ B %bb.1
+
+ bb.1:
+ %10:gpr64common = UBFMXri killed %8, 62, 61
+ dead $xzr = LDRXui killed %10, 0
+ RET_ReallyLR
+
+ bb.2:
+ $x0 = COPY killed %8
+ RET_ReallyLR implicit killed $x0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/selectopt-const.ll b/llvm/test/CodeGen/AArch64/selectopt-const.ll
index a44c746..fe48dba 100644
--- a/llvm/test/CodeGen/AArch64/selectopt-const.ll
+++ b/llvm/test/CodeGen/AArch64/selectopt-const.ll
@@ -29,8 +29,8 @@ define i32 @test_const(ptr %in1, ptr %in2, ptr %out, i32 %n, ptr %tbl) {
; CHECK-NEXT: csel x10, x9, xzr, lt
; CHECK-NEXT: subs x8, x8, #1
; CHECK-NEXT: ldr s3, [x4, x10]
-; CHECK-NEXT: fcvtzs w10, s3
-; CHECK-NEXT: str w10, [x2], #4
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: st1 { v3.s }[0], [x2], #4
; CHECK-NEXT: b.ne .LBB0_2
; CHECK-NEXT: .LBB0_3: // %for.cond.cleanup
; CHECK-NEXT: mov w0, wzr
diff --git a/llvm/test/CodeGen/AArch64/store-float-conversion.ll b/llvm/test/CodeGen/AArch64/store-float-conversion.ll
new file mode 100644
index 0000000..c46801f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/store-float-conversion.ll
@@ -0,0 +1,131 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=aarch64 < %s | FileCheck %s
+
+define void @f32_to_u8(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_u8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu s0, s0
+; CHECK-NEXT: str b0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui float %f to i32
+ %trunc = trunc i32 %conv to i8
+ store i8 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_s8(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs s0, s0
+; CHECK-NEXT: str b0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %trunc = trunc i32 %conv to i8
+ store i8 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_u16(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_u16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu s0, s0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui float %f to i32
+ %trunc = trunc i32 %conv to i16
+ store i16 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_s16(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs s0, s0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %trunc = trunc i32 %conv to i16
+ store i16 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_u32(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_u32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu s0, s0
+; CHECK-NEXT: str s0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui float %f to i32
+ store i32 %conv, ptr %dst
+ ret void
+}
+
+define void @f32_to_s32(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs s0, s0
+; CHECK-NEXT: str s0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ store i32 %conv, ptr %dst
+ ret void
+}
+
+define void @f32_to_s64(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs w8, s0
+; CHECK-NEXT: sxtw x8, w8
+; CHECK-NEXT: str x8, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %ext = sext i32 %conv to i64
+ store i64 %ext, ptr %dst
+ ret void
+}
+
+define void @f64_to_u64(double %d, ptr %dst) {
+; CHECK-LABEL: f64_to_u64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu d0, d0
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui double %d to i64
+ store i64 %conv, ptr %dst
+ ret void
+}
+
+define void @f64_to_s64(double %d, ptr %dst) {
+; CHECK-LABEL: f64_to_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs d0, d0
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi double %d to i64
+ store i64 %conv, ptr %dst
+ ret void
+}
+
+define i32 @f32_to_i32_multiple_uses(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_i32_multiple_uses:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs w8, s0
+; CHECK-NEXT: mov x9, x0
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: strb w8, [x9]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %trunc = trunc i32 %conv to i8
+ store i8 %trunc, ptr %dst
+ ret i32 %conv
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
index 52cb2d9..c7fb2db 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
@@ -267,7 +267,7 @@ define <vscale x 32 x i16> @interleave4_nxv8i16(<vscale x 8 x i16> %vec0, <vscal
; SME2-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; SME2-NEXT: zip { z0.h - z3.h }, { z0.h - z3.h }
; SME2-NEXT: ret
- %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3)
+ %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv32i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3)
ret <vscale x 32 x i16> %retval
}
@@ -540,30 +540,81 @@ define <vscale x 4 x i32> @interleave2_nxv2i32(<vscale x 2 x i32> %vec0, <vscale
ret <vscale x 4 x i32> %retval
}
-; Float declarations
-declare <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half>, <vscale x 2 x half>)
-declare <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half>, <vscale x 4 x half>)
-declare <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half>, <vscale x 8 x half>)
-declare <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float>, <vscale x 2 x float>)
-declare <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>)
-declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)
+define <vscale x 4 x i16> @interleave2_same_const_splat_nxv4i16() {
+; CHECK-LABEL: interleave2_same_const_splat_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, #3 // =0x3
+; CHECK-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_const_splat_nxv4i16() {
+; SVE-LABEL: interleave2_diff_const_splat_nxv4i16:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #4 // =0x4
+; SVE-NEXT: mov z1.d, #3 // =0x3
+; SVE-NEXT: zip2 z2.d, z1.d, z0.d
+; SVE-NEXT: zip1 z0.d, z1.d, z0.d
+; SVE-NEXT: uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT: ret
+;
+; SME2-LABEL: interleave2_diff_const_splat_nxv4i16:
+; SME2: // %bb.0:
+; SME2-NEXT: mov z0.d, #4 // =0x4
+; SME2-NEXT: mov z1.d, #3 // =0x3
+; SME2-NEXT: zip { z0.d, z1.d }, z1.d, z0.d
+; SME2-NEXT: uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.v4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 4))
+ ret <vscale x 4 x i16> %retval
+}
-; Integer declarations
-declare <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+define <vscale x 4 x i16> @interleave2_same_nonconst_splat_nxv4i16(i16 %a) {
+; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, w0
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat, <vscale x 2 x i16> %splat)
+ ret <vscale x 4 x i16> %retval
+}
-; Predicated
-declare <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
-declare <vscale x 16 x i1> @llvm.vector.interleave2.nxv16i1(<vscale x 8 x i1>, <vscale x 8 x i1>)
-declare <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1>, <vscale x 4 x i1>)
-declare <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
-
-; Illegal type size
-declare <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-
-declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+define <vscale x 4 x i16> @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) {
+; SVE-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; SVE: // %bb.0:
+; SVE-NEXT: // kill: def $w1 killed $w1 def $x1
+; SVE-NEXT: // kill: def $w0 killed $w0 def $x0
+; SVE-NEXT: mov z0.d, x0
+; SVE-NEXT: mov z1.d, x1
+; SVE-NEXT: zip2 z2.d, z0.d, z1.d
+; SVE-NEXT: zip1 z0.d, z0.d, z1.d
+; SVE-NEXT: uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT: ret
+;
+; SME2-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; SME2: // %bb.0:
+; SME2-NEXT: // kill: def $w1 killed $w1 def $x1
+; SME2-NEXT: // kill: def $w0 killed $w0 def $x0
+; SME2-NEXT: mov z0.d, x0
+; SME2-NEXT: mov z1.d, x1
+; SME2-NEXT: zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT: uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT: ret
+ %ins1 = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <vscale x 2 x i16> %ins1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %ins2 = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <vscale x 2 x i16> %ins2, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat1, <vscale x 2 x i16> %splat2)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 8 x i16> @interleave4_same_const_splat_nxv8i16() {
+; CHECK-LABEL: interleave4_same_const_splat_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.h, #3 // =0x3
+; CHECK-NEXT: ret
+ %retval = call <vscale x 8 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
index 9306c20..7dcd56c 100644
--- a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
@@ -1,14 +1,14 @@
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s |FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mattr=+sve < %s | FileCheck %s
-declare i32 @llvm.vscale.i32()
-declare i64 @llvm.vscale.i64()
+target triple = "aarch64-unknown-linux-gnu"
; Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
define i64 @combine_add_vscale_i64() nounwind {
; CHECK-LABEL: combine_add_vscale_i64:
-; CHECK-NOT: add
-; CHECK-NEXT: cntd x0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x0
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%add = add i64 %vscale, %vscale
ret i64 %add
@@ -16,9 +16,10 @@ define i64 @combine_add_vscale_i64() nounwind {
define i32 @combine_add_vscale_i32() nounwind {
; CHECK-LABEL: combine_add_vscale_i32:
-; CHECK-NOT: add
-; CHECK-NEXT: cntd x0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x0
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%add = add i32 %vscale, %vscale
ret i32 %add
@@ -28,9 +29,9 @@ define i32 @combine_add_vscale_i32() nounwind {
; In this test, C0 = 1, C1 = 32.
define i64 @combine_mul_vscale_i64() nounwind {
; CHECK-LABEL: combine_mul_vscale_i64:
-; CHECK-NOT: mul
-; CHECK-NEXT: rdvl x0, #2
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #2
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mul = mul i64 %vscale, 32
ret i64 %mul
@@ -38,9 +39,10 @@ define i64 @combine_mul_vscale_i64() nounwind {
define i32 @combine_mul_vscale_i32() nounwind {
; CHECK-LABEL: combine_mul_vscale_i32:
-; CHECK-NOT: mul
-; CHECK-NEXT: rdvl x0, #3
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #3
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%mul = mul i32 %vscale, 48
ret i32 %mul
@@ -49,11 +51,11 @@ define i32 @combine_mul_vscale_i32() nounwind {
; Canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
; CHECK-LABEL: combine_sub_vscale_i64:
-; CHECK-NOT: sub
-; CHECK-NEXT: rdvl x8, #-1
-; CHECK-NEXT: asr x8, x8, #4
-; CHECK-NEXT: add x0, x0, x8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #-1
+; CHECK-NEXT: asr x8, x8, #4
+; CHECK-NEXT: add x0, x0, x8
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%sub = sub i64 %in, %vscale
ret i64 %sub
@@ -61,11 +63,11 @@ define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
; CHECK-LABEL: combine_sub_vscale_i32:
-; CHECK-NOT: sub
-; CHECK-NEXT: rdvl x8, #-1
-; CHECK-NEXT: asr x8, x8, #4
-; CHECK-NEXT: add w0, w0, w8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #-1
+; CHECK-NEXT: asr x8, x8, #4
+; CHECK-NEXT: add w0, w0, w8
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%sub = sub i32 %in, %vscale
ret i32 %sub
@@ -75,12 +77,13 @@ define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
; (sub X, (vscale * C)) to (add X, (vscale * -C))
define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: multiple_uses_sub_vscale_i64:
-; CHECK-NEXT: rdvl x8, #1
-; CHECK-NEXT: lsr x8, x8, #4
-; CHECK-NEXT: sub x9, x0, x8
-; CHECK-NEXT: add x8, x1, x8
-; CHECK-NEXT: mul x0, x9, x8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: sub x9, x0, x8
+; CHECK-NEXT: add x8, x1, x8
+; CHECK-NEXT: mul x0, x9, x8
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%sub = sub i64 %x, %vscale
%add = add i64 %y, %vscale
@@ -95,9 +98,9 @@ define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind {
; Hence, the immediate for RDVL is #1.
define i64 @combine_shl_vscale_i64() nounwind {
; CHECK-LABEL: combine_shl_vscale_i64:
-; CHECK-NOT: shl
-; CHECK-NEXT: rdvl x0, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #1
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%shl = shl i64 %vscale, 4
ret i64 %shl
@@ -105,10 +108,38 @@ define i64 @combine_shl_vscale_i64() nounwind {
define i32 @combine_shl_vscale_i32() nounwind {
; CHECK-LABEL: combine_shl_vscale_i32:
-; CHECK-NOT: shl
-; CHECK-NEXT: rdvl x0, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #1
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%shl = shl i32 %vscale, 4
ret i32 %shl
}
+
+define i64 @combine_shl_mul_vscale(i64 %a) nounwind {
+; CHECK-LABEL: combine_shl_mul_vscale:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cnth x8
+; CHECK-NEXT: mul x0, x0, x8
+; CHECK-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale.i64()
+ %mul = mul i64 %a, %vscale
+ %shl = shl i64 %mul, 3
+ ret i64 %shl
+}
+
+define i64 @combine_shl_mul_vscale_commuted(i64 %a) nounwind {
+; CHECK-LABEL: combine_shl_mul_vscale_commuted:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cnth x8
+; CHECK-NEXT: mul x0, x0, x8
+; CHECK-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale.i64()
+ %mul = mul i64 %vscale, %a
+ %shl = shl i64 %mul, 3
+ ret i64 %shl
+}
+
+declare i32 @llvm.vscale.i32()
+declare i64 @llvm.vscale.i64()
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index aa0a163..5fc996a 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -63,7 +63,8 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcmp s2, #0.0
; CHECK-NEXT: fcsel s2, s0, s3, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
+; CHECK-NEXT: fmov w11, s2
; CHECK-NEXT: strb w11, [x9], #1
; CHECK-NEXT: b.ne .LBB0_7
; CHECK-NEXT: .LBB0_8: // %for.cond.cleanup
@@ -178,12 +179,12 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcmp s3, s1
; CHECK-NEXT: fcsel s4, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
; CHECK-NEXT: fcsel s3, s0, s4, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: strb w11, [x9]
-; CHECK-NEXT: fcvtzs w12, s3
-; CHECK-NEXT: strb w12, [x9, #1]
+; CHECK-NEXT: str b2, [x9]
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: stur b3, [x9, #1]
; CHECK-NEXT: add x9, x9, #2
; CHECK-NEXT: b.ne .LBB1_6
; CHECK-NEXT: .LBB1_7: // %for.cond.cleanup
@@ -395,19 +396,19 @@ define void @loop3(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcsel s4, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
; CHECK-NEXT: ldr s3, [x8, #8]
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
; CHECK-NEXT: add x8, x8, #12
; CHECK-NEXT: fcsel s4, s0, s4, mi
; CHECK-NEXT: fcmp s3, s1
-; CHECK-NEXT: strb w11, [x9]
+; CHECK-NEXT: str b2, [x9]
; CHECK-NEXT: fcsel s5, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
-; CHECK-NEXT: fcvtzs w12, s4
+; CHECK-NEXT: fcvtzs s4, s4
; CHECK-NEXT: fcsel s3, s0, s5, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: strb w12, [x9, #1]
-; CHECK-NEXT: fcvtzs w13, s3
-; CHECK-NEXT: strb w13, [x9, #2]
+; CHECK-NEXT: stur b4, [x9, #1]
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: stur b3, [x9, #2]
; CHECK-NEXT: add x9, x9, #3
; CHECK-NEXT: b.ne .LBB2_8
; CHECK-NEXT: .LBB2_9: // %for.cond.cleanup
@@ -563,26 +564,26 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcmp s3, s1
; CHECK-NEXT: fcsel s4, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
; CHECK-NEXT: ldp s3, s5, [x8, #8]
; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: fcsel s4, s0, s4, mi
; CHECK-NEXT: fcmp s3, s1
-; CHECK-NEXT: strb w11, [x9]
-; CHECK-NEXT: fcvtzs w12, s4
+; CHECK-NEXT: str b2, [x9]
+; CHECK-NEXT: fcvtzs s4, s4
; CHECK-NEXT: fcsel s6, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
; CHECK-NEXT: fcsel s3, s0, s6, mi
; CHECK-NEXT: fcmp s5, s1
-; CHECK-NEXT: strb w12, [x9, #1]
+; CHECK-NEXT: stur b4, [x9, #1]
; CHECK-NEXT: fcsel s6, s1, s5, gt
; CHECK-NEXT: fcmp s5, #0.0
-; CHECK-NEXT: fcvtzs w13, s3
-; CHECK-NEXT: fcsel s2, s0, s6, mi
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: fcsel s5, s0, s6, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: strb w13, [x9, #2]
-; CHECK-NEXT: fcvtzs w14, s2
-; CHECK-NEXT: strb w14, [x9, #3]
+; CHECK-NEXT: stur b3, [x9, #2]
+; CHECK-NEXT: fcvtzs s5, s5
+; CHECK-NEXT: stur b5, [x9, #3]
; CHECK-NEXT: add x9, x9, #4
; CHECK-NEXT: b.ne .LBB3_6
; CHECK-NEXT: .LBB3_7: // %for.cond.cleanup
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index 290a473..74d1165 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -1907,11 +1907,8 @@ define i32 @test_udot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -1952,17 +1949,13 @@ define i32 @test_udot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: umull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: umull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v16i8:
@@ -2018,36 +2011,21 @@ define i32 @test_udot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: umull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: umlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: umull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: uaddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v24i8:
@@ -2118,61 +2096,33 @@ define i32 @test_udot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: ushll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: ushll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: ushll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: umull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: umull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: umull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: umull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: umull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: umlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: umlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: umlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: umlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: umlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: umull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: umull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: umull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: uaddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: uaddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: uaddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v48i8:
@@ -2225,11 +2175,8 @@ define i32 @test_sdot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: smull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: smull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -2270,17 +2217,13 @@ define i32 @test_sdot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: smull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: smull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: saddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v16i8:
@@ -2336,36 +2279,21 @@ define i32 @test_sdot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: smull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: smlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: smull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: saddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v24i8:
@@ -2436,61 +2364,33 @@ define i32 @test_sdot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: sshll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: sshll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: sshll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: smull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: smull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: smull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: smull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: smull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: smlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: smlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: smlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: smlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: smlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: smull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: smull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: smull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: saddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: saddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: saddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v48i8:
@@ -2549,18 +2449,27 @@ define i32 @test_udot_v8i8_multi_use(<8 x i8> %a, <8 x i8> %b) {
; CHECK-SD-DOT-NEXT: add w0, w8, w9
; CHECK-SD-DOT-NEXT: ret
;
-; CHECK-GI-LABEL: test_udot_v8i8_multi_use:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-NEXT: mov v3.16b, v2.16b
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: umlal2 v3.4s, v1.8h, v0.8h
-; CHECK-GI-NEXT: addv s0, v3.4s
-; CHECK-GI-NEXT: fmov w9, s0
-; CHECK-GI-NEXT: add w0, w9, w8
-; CHECK-GI-NEXT: ret
+; CHECK-GI-BASE-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-BASE: // %bb.0: // %entry
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v0.8h
+; CHECK-GI-BASE-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
+; CHECK-GI-BASE-NEXT: ret
+;
+; CHECK-GI-DOT-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-DOT: // %bb.0: // %entry
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: umull v3.8h, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: udot v2.2s, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: ushll v0.4s, v3.4h, #0
+; CHECK-GI-DOT-NEXT: fmov w9, s0
+; CHECK-GI-DOT-NEXT: addp v1.2s, v2.2s, v2.2s
+; CHECK-GI-DOT-NEXT: fmov w8, s1
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
+; CHECK-GI-DOT-NEXT: ret
entry:
%0 = zext <8 x i8> %a to <8 x i32>
%1 = zext <8 x i8> %b to <8 x i32>
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
index eafad58..2226fd2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
@@ -157,7 +157,7 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
%r0 = load half, ptr addrspace(1) %in1, align 4
%r1 = load half, ptr addrspace(1) %gep2, align 4
- %r2 = frem half %r0, %r1
+ %r2 = frem afn half %r0, %r1
store half %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -311,7 +311,7 @@ define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
%r0 = load float, ptr addrspace(1) %in1, align 4
%r1 = load float, ptr addrspace(1) %gep2, align 4
- %r2 = frem float %r0, %r1
+ %r2 = frem afn float %r0, %r1
store float %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -489,7 +489,7 @@ define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace(
ptr addrspace(1) %in2) #1 {
%r0 = load double, ptr addrspace(1) %in1, align 8
%r1 = load double, ptr addrspace(1) %in2, align 8
- %r2 = frem double %r0, %r1
+ %r2 = frem afn double %r0, %r1
store double %r2, ptr addrspace(1) %out, align 8
ret void
}
@@ -1140,5 +1140,5 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
index 1f9c059..3fa73c2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
@@ -2,9 +2,8 @@
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -enable-unsafe-fp-math -o - %s | FileCheck -check-prefix=GFX9-UNSAFE %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX11 %s
---
name: test_fdiv_s16
@@ -99,17 +98,56 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX10-LABEL: name: test_fdiv_s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -120,6 +158,90 @@ body: |
...
---
+name: test_fdiv_s16_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s16_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; SI-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[FPEXT]], [[INT]]
+ ; SI-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
+ ; SI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s16_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; VI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s16_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s16_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = afn G_FDIV %2, %3
+ %5:_(s32) = G_ANYEXT %4
+ $vgpr0 = COPY %5
+...
+
+---
name: test_fdiv_s32_denorms_on
machineFunctionInfo:
mode:
@@ -192,15 +314,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_on
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_on
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -220,6 +333,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -227,6 +360,70 @@ body: |
...
---
+name: test_fdiv_s32_denorms_on_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+
+---
name: test_fdiv_s32_denorms_off
machineFunctionInfo:
mode:
@@ -305,15 +502,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -335,6 +523,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -342,6 +552,69 @@ body: |
...
---
+name: test_fdiv_s32_denorms_off_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: false
+ fp32-output-denormals: false
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+---
name: test_fdiv_s32_denorms_off_arcp
machineFunctionInfo:
mode:
@@ -420,15 +693,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off_arcp
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off_arcp
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -450,6 +714,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_arcp
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = arcp G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = arcp G_FDIV %0, %1
@@ -536,23 +822,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -572,6 +841,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_FDIV %0, %1
@@ -708,20 +997,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -760,6 +1035,45 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = G_FDIV %0, %1
@@ -877,20 +1191,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32_flags
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32_flags
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -925,6 +1225,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32_flags
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = nnan G_FDIV %0, %1
@@ -1078,22 +1413,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV4]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV5]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[UV2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32), [[FMUL2]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v3s32
; GFX10: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
; GFX10-NEXT: {{ $}}
@@ -1140,6 +1459,53 @@ body: |
; GFX10-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s32
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+ ; GFX11-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV3]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV4]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[INT14:%[0-9]+]]:_(s32), [[INT15:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 0
+ ; GFX11-NEXT: [[INT16:%[0-9]+]]:_(s32), [[INT17:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 1
+ ; GFX11-NEXT: [[INT18:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT14]](s32)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[INT14]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[INT18]], [[C]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FMA10]], [[INT18]], [[INT18]]
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[INT16]], [[FMA11]]
+ ; GFX11-NEXT: [[FMA12:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL2]], [[INT16]]
+ ; GFX11-NEXT: [[FMA13:%[0-9]+]]:_(s32) = G_FMA [[FMA12]], [[FMA11]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA14:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA13]], [[INT16]]
+ ; GFX11-NEXT: [[INT19:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA14]](s32), [[FMA11]](s32), [[FMA13]](s32), [[INT17]](s1)
+ ; GFX11-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
%2:_(<3 x s32>) = G_FDIV %0, %1
@@ -1271,35 +1637,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[UV2]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[UV]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[UV3]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[INT1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA7]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FMA8]], [[FMA7]], [[FMA7]]
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[FMA9]]
- ; GFX9-UNSAFE-NEXT: [[FMA10:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[UV1]]
- ; GFX9-UNSAFE-NEXT: [[FMA11:%[0-9]+]]:_(s64) = G_FMA [[FMA10]], [[FMA9]], [[FMUL1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FMA5]](s64), [[FMA11]](s64)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s64
; GFX10: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
; GFX10-NEXT: {{ $}}
@@ -1334,6 +1671,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s64
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[UV2]](s64), [[UV]](s64)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s64), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 0
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s64)
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT9]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[INT9]], [[FMA5]], [[INT9]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA6]], [[C]]
+ ; GFX11-NEXT: [[INT10:%[0-9]+]]:_(s64), [[INT11:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 1
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[FMA7]], [[FMA6]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INT10]], [[FMA8]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[INT10]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s64), [[FMA8]](s64), [[FMUL1]](s64), [[INT11]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
%2:_(<2 x s64>) = G_FDIV %0, %1
@@ -1502,26 +1874,92 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC2]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v2s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC2]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC3]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV2]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV3]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_FDIV %0, %1
@@ -1756,37 +2194,133 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
; GFX9-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL2]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
- ; GFX9-UNSAFE-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ; GFX10-LABEL: name: test_fdiv_v3s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC3]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC4]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC5]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX10-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX10-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX11-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX11-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX11-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s16>) = G_IMPLICIT_DEF
%1:_(<3 x s16>) = G_IMPLICIT_DEF
%2:_(<3 x s16>) = G_FDIV %0, %1
@@ -2094,42 +2628,164 @@ body: |
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v4s16
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC6]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC7]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL3:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC3]], [[INT3]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL2]](s16), [[FMUL3]](s16)
- ; GFX9-UNSAFE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v4s16
+ ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC4]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC5]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC6]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
+ ; GFX10-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX10-NEXT: [[FMUL15:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX10-NEXT: [[FMUL16:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FMUL15]]
+ ; GFX10-NEXT: [[FADD12:%[0-9]+]]:_(s32) = G_FADD [[FMUL16]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL17:%[0-9]+]]:_(s32) = G_FMUL [[FADD12]], [[INT6]]
+ ; GFX10-NEXT: [[FADD13:%[0-9]+]]:_(s32) = G_FADD [[FMUL17]], [[FMUL15]]
+ ; GFX10-NEXT: [[FMUL18:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FADD13]]
+ ; GFX10-NEXT: [[FADD14:%[0-9]+]]:_(s32) = G_FADD [[FMUL18]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL19:%[0-9]+]]:_(s32) = G_FMUL [[FADD14]], [[INT6]]
+ ; GFX10-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL19]], [[C1]]
+ ; GFX10-NEXT: [[FADD15:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FADD13]]
+ ; GFX10-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD15]](s32)
+ ; GFX10-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[TRUNC7]](s16), [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v4s16
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+ ; GFX11-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX11-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMUL6]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FMA9]], [[INT6]], [[FMUL6]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMA10]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FMA11]], [[INT6]]
+ ; GFX11-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL7]], [[C]]
+ ; GFX11-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FMA10]]
+ ; GFX11-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[UV7]](s16), [[UV3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_FDIV %0, %1
@@ -2185,15 +2841,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2202,6 +2849,15 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2261,16 +2917,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2280,6 +2926,16 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2351,13 +3007,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2376,6 +3025,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2448,14 +3116,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2475,6 +3135,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2558,22 +3238,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2592,6 +3256,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double 1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
@@ -2678,23 +3361,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2714,6 +3380,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double -1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
diff --git a/llvm/test/CodeGen/AMDGPU/add-max.ll b/llvm/test/CodeGen/AMDGPU/add-max.ll
index b992506..00c6656 100644
--- a/llvm/test/CodeGen/AMDGPU/add-max.ll
+++ b/llvm/test/CodeGen/AMDGPU/add-max.ll
@@ -5,9 +5,7 @@
define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -18,9 +16,7 @@ define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_svv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v1
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, v1
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -29,12 +25,17 @@ define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
}
define amdgpu_ps float @add_max_u32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
-; GCN-LABEL: add_max_u32_ssv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_add_co_i32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_ssv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, s0, s1, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_ssv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_add_co_i32 s0, s0, s1
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
%ret = bitcast i32 %max to float
@@ -58,9 +59,7 @@ define amdgpu_ps float @add_max_u32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c
define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
; GCN-LABEL: add_max_u32_vsi:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 4, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, s0, 4
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 4)
@@ -71,9 +70,7 @@ define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
; GCN-LABEL: add_max_u32_svl:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 0x64, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, 0x64
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 100)
@@ -82,12 +79,17 @@ define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
}
define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
-; GCN-LABEL: add_max_u32_slv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_addk_co_i32 s0, 0x64
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_slv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, 0x64, s0, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_slv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_addk_co_i32 s0, 0x64
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, 100
%max = call i32 @llvm.umax.i32(i32 %add, i32 %b)
%ret = bitcast i32 %max to float
@@ -97,9 +99,7 @@ define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smax.i32(i32 %add, i32 %c)
@@ -110,9 +110,7 @@ define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umin.i32(i32 %add, i32 %c)
@@ -123,9 +121,7 @@ define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smin.i32(i32 %add, i32 %c)
diff --git a/llvm/test/CodeGen/AMDGPU/bug-undef-spilled-agpr.mir b/llvm/test/CodeGen/AMDGPU/bug-undef-spilled-agpr.mir
new file mode 100644
index 0000000..7336a54
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/bug-undef-spilled-agpr.mir
@@ -0,0 +1,97 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -run-pass=si-lower-sgpr-spills,greedy,si-lower-wwm-copies,virtregrewriter,prologepilog -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
+
+---
+name: widget
+tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
+stack:
+ - { id: 0, type: spill-slot, size: 4, alignment: 4, stack-id: sgpr-spill }
+ - { id: 1, type: spill-slot, size: 4, alignment: 4, stack-id: sgpr-spill }
+machineFunctionInfo:
+ hasSpilledSGPRs: true
+ scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3'
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ ; GCN-LABEL: name: widget
+ ; GCN: bb.0:
+ ; GCN-NEXT: successors: %bb.1(0x80000000)
+ ; GCN-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15, $agpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: $sgpr4_sgpr5 = S_XOR_SAVEEXEC_B64 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
+ ; GCN-NEXT: $vgpr63 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec
+ ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr63, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 0, 0, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; GCN-NEXT: $exec = S_MOV_B64 -1
+ ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr62, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 4, 0, 0, implicit $exec :: (store (s32) into %stack.3, addrspace 5)
+ ; GCN-NEXT: $exec = S_MOV_B64 killed $sgpr4_sgpr5
+ ; GCN-NEXT: renamable $vgpr62 = IMPLICIT_DEF
+ ; GCN-NEXT: $vgpr62 = SI_SPILL_S32_TO_VGPR $sgpr15, 0, killed $vgpr62
+ ; GCN-NEXT: $noreg = S_OR_SAVEEXEC_B64 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
+ ; GCN-NEXT: renamable $agpr0 = COPY killed renamable $vgpr62
+ ; GCN-NEXT: $exec = S_MOV_B64 killed $noreg
+ ; GCN-NEXT: renamable $vgpr62 = IMPLICIT_DEF
+ ; GCN-NEXT: dead renamable $vgpr62 = V_AND_B32_e32 1, killed $vgpr62, implicit $exec
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.1:
+ ; GCN-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ ; GCN-NEXT: liveins: $agpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; GCN-NEXT: S_BRANCH %bb.3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.2:
+ ; GCN-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000)
+ ; GCN-NEXT: liveins: $agpr0, $sgpr86, $sgpr87, $sgpr66_sgpr67, $sgpr68_sgpr69, $sgpr70_sgpr71, $sgpr80_sgpr81, $sgpr82_sgpr83, $sgpr84_sgpr85, $sgpr96_sgpr97, $sgpr98_sgpr99
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: S_CBRANCH_EXECNZ %bb.1, implicit $exec
+ ; GCN-NEXT: S_BRANCH %bb.4
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.3:
+ ; GCN-NEXT: successors: %bb.2(0x80000000)
+ ; GCN-NEXT: liveins: $agpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: $noreg = S_OR_SAVEEXEC_B64 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
+ ; GCN-NEXT: renamable $vgpr62 = COPY renamable $agpr0
+ ; GCN-NEXT: $exec = S_MOV_B64 killed $noreg
+ ; GCN-NEXT: $sgpr14 = SI_RESTORE_S32_FROM_VGPR killed $vgpr62, 1
+ ; GCN-NEXT: S_BRANCH %bb.2
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.4:
+ ; GCN-NEXT: $sgpr4_sgpr5 = S_XOR_SAVEEXEC_B64 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
+ ; GCN-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 0, 0, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; GCN-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr63, implicit $exec
+ ; GCN-NEXT: $exec = S_MOV_B64 -1
+ ; GCN-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 4, 0, 0, implicit $exec :: (load (s32) from %stack.3, addrspace 5)
+ ; GCN-NEXT: $exec = S_MOV_B64 killed $sgpr4_sgpr5
+ ; GCN-NEXT: SI_RETURN
+ bb.0:
+ liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr15
+
+ %45:vgpr_32 = IMPLICIT_DEF
+ SI_SPILL_S32_SAVE $sgpr15, %stack.0, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.0, addrspace 5)
+ %16:vgpr_32 = V_AND_B32_e32 1, %45, implicit $exec
+
+ bb.1:
+ successors: %bb.3, %bb.2
+
+ S_CBRANCH_EXECZ %bb.2, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.2:
+ successors: %bb.4(0x04000000), %bb.1(0x7c000000)
+ liveins: $sgpr86, $sgpr87, $sgpr66_sgpr67, $sgpr68_sgpr69, $sgpr70_sgpr71, $sgpr80_sgpr81, $sgpr82_sgpr83, $sgpr84_sgpr85, $sgpr96_sgpr97, $sgpr98_sgpr99
+
+ S_CBRANCH_EXECNZ %bb.1, implicit $exec
+ S_BRANCH %bb.4
+
+ bb.3:
+ ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+ $sgpr14 = SI_SPILL_S32_RESTORE %stack.1, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.1, addrspace 5)
+ ADJCALLSTACKDOWN 0, 28, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+ S_BRANCH %bb.2
+
+ bb.4:
+ SI_RETURN
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
new file mode 100644
index 0000000..fcbf7ef
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -show-mc-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+define i16 @cvt_pk_bf8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_bf8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_bf8_f16 v0, 0x38003800 ; encoding: [0x00,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x38]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0xH3800, half 0xH3800>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
+
+define i16 @cvt_pk_fp8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_fp8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_fp8_f16 v0, 0x3800 ; encoding: [0x00,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0xH3800, half 0xH0>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
index 9ae9d19..210e09f 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -1702,7 +1702,7 @@ entry:
%gep.r = getelementptr inbounds half, ptr addrspace(1) %r, i64 %tid.ext
%a.val = load volatile half, ptr addrspace(1) %gep.a
%b.val = load volatile half, ptr addrspace(1) %gep.b
- %r.val = fdiv half %a.val, %b.val
+ %r.val = fdiv afn half %a.val, %b.val
store half %r.val, ptr addrspace(1) %gep.r
ret void
}
@@ -2475,4 +2475,4 @@ declare <2 x half> @llvm.sqrt.v2f16(<2 x half>) #2
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
index 57b4857..c52fb61 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -11,6 +11,10 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SDAG-FAKE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-TRUE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-FAKE16 %s
define amdgpu_kernel void @fptrunc_f32_to_f16(
; SI-SDAG-LABEL: fptrunc_f32_to_f16:
@@ -192,6 +196,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -381,6 +418,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_afn(ptr addrspace(1) %r,
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %a) {
entry:
%a.val = load float, ptr addrspace(1) %a
@@ -1089,6 +1159,130 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s3, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s2, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s7, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s9, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s2, s7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s2, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s2, s2, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s7, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s2, s2, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 0x7c00, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1101,62 +1295,21 @@ entry:
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; SI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v1
-; SI-SDAG-NEXT: s_and_b32 s6, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s7, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s8, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; SI-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s6, s6, s7
-; SI-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; SI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; SI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s6, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; SI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s7
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s6, s1, s0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1174,62 +1327,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v1
-; VI-SDAG-NEXT: s_and_b32 s5, s4, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s5, v0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s8, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_and_b32 s5, s7, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s5, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; VI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; VI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; VI-SDAG-NEXT: s_or_b32 s9, s5, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; VI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s5, 0
-; VI-SDAG-NEXT: s_cselect_b32 s5, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s7
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s5
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1247,62 +1359,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX9-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX9-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX9-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX9-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1320,62 +1391,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX950-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX950-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX950-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX950-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1401,60 +1431,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -1468,60 +1451,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -1552,6 +1488,40 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1769,6 +1739,38 @@ define amdgpu_kernel void @fptrunc_v2f32_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3014,6 +3016,225 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s9, 0x3f1, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s5, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s5, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s2, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s8, 1, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s10, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s8, s8, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s9, s2, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s8, s8, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s4, s4, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s11, s10, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s4, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s8, s10
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s2, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s2, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s2, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s7, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s9, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s10, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s6, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s4, s7, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3026,106 +3247,25 @@ entry:
define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; SI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v3
-; SI-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; SI-SDAG-NEXT: s_and_b32 s7, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s8, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s9, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; SI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; SI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; SI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; SI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; SI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; SI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; SI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s7, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; SI-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; SI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; SI-SDAG-NEXT: s_or_b32 s1, s1, s7
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; SI-SDAG-NEXT: s_lshl_b32 s1, s1, 16
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s8
-; SI-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; SI-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s0, s6, s0
-; SI-SDAG-NEXT: s_and_b32 s0, s0, 0xffff
-; SI-SDAG-NEXT: s_or_b32 s6, s0, s1
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; SI-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3147,106 +3287,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; VI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v3
-; VI-SDAG-NEXT: s_and_b32 s7, s4, 0x1ff
-; VI-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; VI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; VI-SDAG-NEXT: s_lshr_b32 s8, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s9, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; VI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; VI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; VI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s7, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; VI-SDAG-NEXT: s_and_b32 s8, s5, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_lshr_b32 s9, s5, 8
-; VI-SDAG-NEXT: s_bfe_u32 s10, s5, 0xb0014
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; VI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; VI-SDAG-NEXT: s_lshl_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, s8
-; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s6
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3267,104 +3325,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX9-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX9-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX9-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX9-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX9-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX9-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX9-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3385,104 +3363,22 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX950-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX950-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX950-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX950-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX950-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX950-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX950-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX950-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3511,109 +3407,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v1, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_pack_b32_f16 v0, v0.h, v0.l
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -3627,109 +3431,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -3768,6 +3480,46 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v1, s[6:7]
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3957,6 +3709,42 @@ define amdgpu_kernel void @fneg_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_xor_b32 s2, s2, 0x80000000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4147,6 +3935,42 @@ define amdgpu_kernel void @fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4337,6 +4161,42 @@ define amdgpu_kernel void @fneg_fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset1_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4536,6 +4396,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4735,6 +4631,45 @@ define amdgpu_kernel void @fptrunc_fabs_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4943,6 +4878,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_sext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
index 4f8eab1..5d31177 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
@@ -226,59 +226,59 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: s_mov_b32 s0, s4
+; VI-SDAG-NEXT: s_lshr_b32 s4, s7, 8
+; VI-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
+; VI-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
+; VI-SDAG-NEXT: s_or_b32 s4, s4, s6
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_mov_b32 s1, s5
+; VI-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v0
+; VI-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
+; VI-SDAG-NEXT: s_or_b32 s4, s8, s4
+; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
+; VI-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
+; VI-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v0
+; VI-SDAG-NEXT: s_lshr_b32 s9, s5, s8
+; VI-SDAG-NEXT: s_lshl_b32 s8, s9, s8
+; VI-SDAG-NEXT: s_cmp_lg_u32 s8, s5
+; VI-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; VI-SDAG-NEXT: s_addk_i32 s6, 0xfc10
+; VI-SDAG-NEXT: s_lshl_b32 s8, s6, 12
+; VI-SDAG-NEXT: s_or_b32 s5, s9, s5
+; VI-SDAG-NEXT: s_or_b32 s8, s4, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 1
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s8
+; VI-SDAG-NEXT: s_and_b32 s8, s5, 7
+; VI-SDAG-NEXT: s_cmp_gt_i32 s8, 5
+; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
+; VI-SDAG-NEXT: s_cmp_eq_u32 s8, 3
+; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
+; VI-SDAG-NEXT: s_or_b32 s8, s8, s9
+; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; VI-SDAG-NEXT: s_add_i32 s5, s5, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 31
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
+; VI-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, s5
+; VI-SDAG-NEXT: s_lshr_b32 s5, s7, 16
+; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
+; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
+; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16:
; VI-GISEL: ; %bb.0:
@@ -331,68 +331,57 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX10-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX10-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX10-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX10-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX10-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX10-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX10-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX10-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX10-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX10-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX10-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX10-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX10-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX10-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX10-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX10-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX10-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX10-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX10-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX10-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX10-GISEL: ; %bb.0:
@@ -445,76 +434,65 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX11-SAFE-SDAG: ; %bb.0:
-; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-SAFE-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX11-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX11-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX11-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX11-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX11-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX11-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX11-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX11-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX11-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX11-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX11-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX11-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX11-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX11-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX11-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX11-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
+; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX11-GISEL: ; %bb.0:
@@ -570,30 +548,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX11-GISEL-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-TRUE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-TRUE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-FAKE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-FAKE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_endpgm
%result = fptrunc double %in to half
%result_i16 = bitcast half %result to i16
store i16 %result_i16, ptr addrspace(1) %out
@@ -603,111 +557,27 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double %in) {
; SI-LABEL: fptrunc_f64_to_f16_afn:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s3, 0xf000
-; SI-NEXT: s_movk_i32 s2, 0x7e00
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshr_b32 s0, s7, 8
-; SI-NEXT: s_and_b32 s1, s7, 0x1ff
-; SI-NEXT: s_and_b32 s8, s0, 0xffe
-; SI-NEXT: s_or_b32 s0, s1, s6
-; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: s_bfe_u32 s0, s7, 0xb0014
-; SI-NEXT: v_readfirstlane_b32 s1, v0
-; SI-NEXT: s_sub_i32 s6, 0x3f1, s0
-; SI-NEXT: s_or_b32 s1, s8, s1
-; SI-NEXT: v_med3_i32 v0, s6, 0, 13
-; SI-NEXT: s_or_b32 s6, s1, 0x1000
-; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_lshr_b32 s9, s6, s8
-; SI-NEXT: s_lshl_b32 s8, s9, s8
-; SI-NEXT: s_cmp_lg_u32 s8, s6
-; SI-NEXT: s_cselect_b32 s6, 1, 0
-; SI-NEXT: s_addk_i32 s0, 0xfc10
-; SI-NEXT: s_or_b32 s6, s9, s6
-; SI-NEXT: s_lshl_b32 s8, s0, 12
-; SI-NEXT: s_or_b32 s8, s1, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 1
-; SI-NEXT: s_cselect_b32 s6, s6, s8
-; SI-NEXT: s_and_b32 s8, s6, 7
-; SI-NEXT: s_cmp_gt_i32 s8, 5
-; SI-NEXT: s_cselect_b32 s9, 1, 0
-; SI-NEXT: s_cmp_eq_u32 s8, 3
-; SI-NEXT: s_cselect_b32 s8, 1, 0
-; SI-NEXT: s_lshr_b32 s6, s6, 2
-; SI-NEXT: s_or_b32 s8, s8, s9
-; SI-NEXT: s_add_i32 s6, s6, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 31
-; SI-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; SI-NEXT: s_cmp_lg_u32 s1, 0
-; SI-NEXT: s_cselect_b32 s1, s2, 0x7c00
-; SI-NEXT: s_cmpk_eq_i32 s0, 0x40f
-; SI-NEXT: s_cselect_b32 s0, s1, s6
-; SI-NEXT: s_lshr_b32 s1, s7, 16
-; SI-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-NEXT: s_or_b32 s6, s1, s0
-; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: s_mov_b32 s0, s4
-; SI-NEXT: s_mov_b32 s1, s5
-; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; VI-GISEL: ; %bb.0:
@@ -720,68 +590,16 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; GFX10-GISEL: ; %bb.0:
@@ -794,74 +612,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX11-SAFE-SDAG: ; %bb.0:
; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-SAFE-SDAG-NEXT: s_endpgm
;
@@ -1833,4 +1592,8 @@ define amdgpu_kernel void @fptrunc_v8f64_to_v8f32_afn(ptr addrspace(1) %out, <8
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX10-SAFE-GISEL: {{.*}}
+; GFX10-SAFE-SDAG: {{.*}}
+; GFX10-UNSAFE-SDAG: {{.*}}
; VI-SAFE-GISEL: {{.*}}
+; VI-SAFE-SDAG: {{.*}}
+; VI-UNSAFE-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
index 87c7cce..f81950b 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
@@ -1294,13 +1294,13 @@ define float @v_sqrt_f32__enough_unsafe_attrs(float %x) #3 {
ret float %result
}
-define float @v_sqrt_f32__unsafe_attr(float %x) #4 {
+define float @v_sqrt_f32__unsafe_attr(float %x) {
; GCN-LABEL: v_sqrt_f32__unsafe_attr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_sqrt_f32_e32 v0, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
- %result = call nsz float @llvm.sqrt.f32(float %x)
+ %result = call afn nsz float @llvm.sqrt.f32(float %x)
ret float %result
}
@@ -4763,7 +4763,6 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
attributes #1 = { convergent nounwind willreturn memory(none) }
attributes #2 = { "approx-func-fp-math"="true" }
attributes #3 = { "approx-func-fp-math"="true" "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
-attributes #4 = { "unsafe-fp-math"="true" }
attributes #5 = { "no-infs-fp-math"="true" }
!0 = !{float 0.5}
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll b/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll
new file mode 100644
index 0000000..892955c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll
@@ -0,0 +1,98 @@
+; RUN: not llc -mtriple=amdgcn-amd-amdhsa -mcpu=bonaire -filetype=null %s 2>&1 | FileCheck -implicit-check-not=error %s
+
+; CHECK: error: couldn't allocate output register for constraint '{v256}'
+define void @out_of_bounds_vgpr32_def() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v256}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate output register for constraint '{v[255:256]}'
+define void @out_of_bounds_vgpr64_def_high_tuple() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v[255:256]}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate output register for constraint '{v[256:257]}'
+define void @out_of_bounds_vgpr64_def_low_tuple() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v[256:257]}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v256}'
+define void @out_of_bounds_vgpr32_use() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 %0, %1", "=v,{v256}"(i32 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[255:256]}'
+define void @out_of_bounds_vgpr64_high_tuple() {
+ tail call void asm sideeffect "; use %0", "{v[255:256]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[256:257]}'
+define void @out_of_bounds_vgpr64_low_tuple() {
+ tail call void asm sideeffect "; use %0", "{v[256:257]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[1:0]}'
+define void @vgpr_tuple_swapped() {
+ tail call void asm sideeffect "; use %0", "{v[1:0]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v4294967295}'
+define void @vgpr_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v4294967295}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v4294967296}'
+define void @vgpr_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v4294967296}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967295:4294967296]}'
+define void @vgpr_tuple_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[4294967295:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[0:4294967295]}'
+define void @vgpr_tuple_0_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[0:4294967295]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[0:4294967296]}'
+define void @vgpr_tuple_0_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v[0:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967264:4294967295]}'
+define void @vgpr32_last_is_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[4294967264:4294967295]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967265:4294967296]}'
+define void @vgpr32_last_is_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v[4294967265:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[2:2147483651]}'
+define void @overflow_bitwidth_0() {
+ tail call void asm sideeffect "; use %0", "{v[2:2147483651]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[2147483635:2147483651]}'
+define void @overflow_bitwidth_1() {
+ tail call void asm sideeffect "; use %0", "{v[2147483635:2147483651]}"(i64 123)
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
index 43c8d83..fd51759 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
@@ -1,10 +1,188 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-TRUE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+declare i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float, float, i32, i1)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float, i32, i32, i32)
declare float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32, i32)
+define i32 @test_cvt_pk_fp8_f32_word0(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.l, v0, v1 clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 false)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_pk_fp8_f32_word1(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 true)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_pk_fp8_f32_word1_dpp(i32 %a, float %y, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %tmp1, float %y, i32 %old, i1 true)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte0(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 0)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte1(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 1)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte2(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:2 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 2)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte3(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:3 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 3)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_sr_fp8_f32_byte1_dpp(i32 %a, i32 %r, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %tmp1, i32 %r, i32 %old, i32 1)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
define float @test_cvt_f32_fp8_e5m3_byte0(i32 %a) {
; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte0:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
new file mode 100644
index 0000000..6ccfad7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
@@ -0,0 +1,539 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-REAL16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-REAL16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-FAKE16 %s
+
+declare i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half>)
+declare i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half>)
+declare i32 @llvm.amdgcn.cvt.sr.bf8.f16(half, i32, i32, i32)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f16(half, i32, i32, i32)
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
new file mode 100644
index 0000000..2179800
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+declare <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
new file mode 100644
index 0000000..4309cfbe
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_bf8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_bf8 v[4:7], v[0:1], v2 scale_sel:2
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 2)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_f16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_f16_fp4 v[4:7], v0, v1 scale_sel:3
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 3)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp4 v[4:7], v0, v1 scale_sel:4
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 4)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 7)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 1)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
index 291a4e2..217c306 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
@@ -168,7 +168,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -179,7 +179,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -213,7 +213,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -225,7 +225,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -259,7 +259,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -269,7 +269,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -300,7 +300,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -311,7 +311,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -345,7 +345,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -357,7 +357,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -391,7 +391,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -401,7 +401,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -773,7 +773,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -783,7 +783,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -895,7 +895,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -905,7 +905,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -935,7 +935,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -945,7 +945,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
@@ -1602,7 +1602,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1613,7 +1613,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1647,7 +1647,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1659,7 +1659,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1693,7 +1693,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1703,7 +1703,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -1734,7 +1734,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1745,7 +1745,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1779,7 +1779,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1791,7 +1791,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1825,7 +1825,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1835,7 +1835,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -2032,7 +2032,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -2042,7 +2042,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -2112,7 +2112,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -2122,7 +2122,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -2152,7 +2152,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -2162,7 +2162,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
new file mode 100644
index 0000000..82991ae
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; xUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: GlobalISel does not work with bf16
+
+declare <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
index 425a853..477f0a6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
@@ -51,7 +51,7 @@ define amdgpu_kernel void @safe_no_fp32_denormals_rcp_f32(ptr addrspace(1) %out,
; SI-NOT: [[RESULT]]
; SI: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @safe_f32_denormals_rcp_pat_f32(ptr addrspace(1) %out, float %src) #4 {
- %rcp = fdiv float 1.0, %src, !fpmath !0
+ %rcp = fdiv afn float 1.0, %src, !fpmath !0
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -105,8 +105,8 @@ define amdgpu_kernel void @safe_rsq_rcp_pat_amdgcn_sqrt_f32_nocontract(ptr addrs
; SI: v_sqrt_f32_e32
; SI: v_rcp_f32_e32
define amdgpu_kernel void @unsafe_rsq_rcp_pat_f32(ptr addrspace(1) %out, float %src) #2 {
- %sqrt = call float @llvm.sqrt.f32(float %src)
- %rcp = call float @llvm.amdgcn.rcp.f32(float %sqrt)
+ %sqrt = call afn float @llvm.sqrt.f32(float %src)
+ %rcp = call afn float @llvm.amdgcn.rcp.f32(float %sqrt)
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -148,7 +148,7 @@ define amdgpu_kernel void @rcp_pat_f64(ptr addrspace(1) %out, double %src) #1 {
; SI: v_fma_f64
; SI: v_fma_f64
define amdgpu_kernel void @unsafe_rcp_pat_f64(ptr addrspace(1) %out, double %src) #2 {
- %rcp = fdiv double 1.0, %src
+ %rcp = fdiv afn double 1.0, %src
store double %rcp, ptr addrspace(1) %out, align 8
ret void
}
@@ -214,9 +214,9 @@ define amdgpu_kernel void @unsafe_amdgcn_sqrt_rsq_rcp_pat_f64(ptr addrspace(1) %
}
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #3 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index 8c1e166..7151fee 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -3227,72 +3227,6 @@ define float @v_exp_f32_fast(float %in) {
ret float %result
}
-define float @v_exp_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp.f32(float %in)
- ret float %result
-}
-
define float @v_exp_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index edc505b..918b1b2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -3235,78 +3235,6 @@ define float @v_exp10_f32_fast(float %in) {
ret float %result
}
-define float @v_exp10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp10.f32(float %in)
- ret float %result
-}
-
define float @v_exp10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp10_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index 38d1b47..307fa89 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -3076,121 +3076,6 @@ define float @v_log_f32_fast(float %in) {
ret float %result
}
-define float @v_log_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3f317218, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc1b17218, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3f317218, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc1b17218, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3f317218, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log.f32(float %in)
- ret float %result
-}
-
define float @v_log_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 058933f..5278589 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -3076,121 +3076,6 @@ define float @v_log10_f32_fast(float %in) {
ret float %result
}
-define float @v_log10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3e9a209b, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc11a209b, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3e9a209b, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc11a209b, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3e9a209b, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log10.f32(float %in)
- ret float %result
-}
-
define float @v_log10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log10_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
index 1e6b77e..702a69f 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
@@ -77,7 +77,7 @@ define amdgpu_kernel void @copy_flat(ptr nocapture %d, ptr nocapture readonly %s
; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], 16
; GFX1250-NEXT: s_cmp_lg_u32 s6, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1]
+; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[0:1], 16
; GFX1250-NEXT: s_cbranch_scc1 .LBB0_2
@@ -400,9 +400,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -438,9 +438,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
@@ -490,7 +490,7 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7]
+; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 16, v[0:1]
; GFX1250-NEXT: s_cbranch_scc1 .LBB4_2
@@ -531,9 +531,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -569,9 +569,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
diff --git a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
index 05a0b1a..836e88c 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
@@ -35,13 +35,6 @@ define amdgpu_ps float @mad_i32_vvv(i32 %a, i32 %b, i32 %c) {
}
define amdgpu_ps float @mad_i32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c) {
-; GCN-LABEL: mad_i32_sss:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_mul_i32 s0, s0, s1
-; GCN-NEXT: s_add_i32 s0, s0, s2
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: ; return to shader part epilog
-;
; GFX9-LABEL: mad_i32_sss:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mul_i32 s0, s0, s1
diff --git a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
index be02045..4c0ab91 100644
--- a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
+++ b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
@@ -6982,7 +6982,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; CHECK-NEXT: global_store_dwordx4 v[100:101], v[96:99], off offset:16
; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0x800
; CHECK-NEXT: s_cbranch_scc1 .LBB6_2
-; CHECK-NEXT: .LBB6_3: ; %Flow9
+; CHECK-NEXT: .LBB6_3: ; %Flow7
; CHECK-NEXT: s_andn2_saveexec_b32 s8, s6
; CHECK-NEXT: s_cbranch_execz .LBB6_6
; CHECK-NEXT: ; %bb.4: ; %memmove_bwd_loop.preheader
@@ -7048,7 +7048,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; CHECK-NEXT: global_store_dwordx4 v[100:101], v[96:99], off offset:16
; CHECK-NEXT: s_cmp_eq_u64 s[4:5], s[6:7]
; CHECK-NEXT: s_cbranch_scc0 .LBB6_5
-; CHECK-NEXT: .LBB6_6: ; %Flow10
+; CHECK-NEXT: .LBB6_6: ; %Flow8
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_setpc_b64 s[30:31]
;
@@ -7689,7 +7689,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; ALIGNED-NEXT: global_store_byte v[16:17], v11, off offset:3
; ALIGNED-NEXT: global_store_byte v[16:17], v4, off offset:1
; ALIGNED-NEXT: s_cbranch_scc1 .LBB6_2
-; ALIGNED-NEXT: .LBB6_3: ; %Flow9
+; ALIGNED-NEXT: .LBB6_3: ; %Flow7
; ALIGNED-NEXT: s_andn2_saveexec_b32 s8, s6
; ALIGNED-NEXT: s_cbranch_execz .LBB6_6
; ALIGNED-NEXT: ; %bb.4: ; %memmove_bwd_loop.preheader
@@ -8316,7 +8316,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; ALIGNED-NEXT: global_store_byte v[16:17], v11, off offset:3
; ALIGNED-NEXT: global_store_byte v[16:17], v4, off offset:1
; ALIGNED-NEXT: s_cbranch_scc0 .LBB6_5
-; ALIGNED-NEXT: .LBB6_6: ; %Flow10
+; ALIGNED-NEXT: .LBB6_6: ; %Flow8
; ALIGNED-NEXT: s_or_b32 exec_lo, exec_lo, s8
; ALIGNED-NEXT: s_clause 0x7
; ALIGNED-NEXT: buffer_load_dword v47, off, s[0:3], s32
@@ -8369,7 +8369,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; UNROLL3-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:2032
; UNROLL3-NEXT: ; implicit-def: $vgpr2_vgpr3
; UNROLL3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; UNROLL3-NEXT: .LBB6_4: ; %Flow7
+; UNROLL3-NEXT: .LBB6_4: ; %Flow5
; UNROLL3-NEXT: s_andn2_saveexec_b32 s8, s6
; UNROLL3-NEXT: s_cbranch_execz .LBB6_7
; UNROLL3-NEXT: ; %bb.5: ; %memmove_bwd_residual
@@ -8403,7 +8403,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; UNROLL3-NEXT: global_store_dwordx4 v[16:17], v[12:15], off offset:32
; UNROLL3-NEXT: s_cmp_eq_u64 s[4:5], s[6:7]
; UNROLL3-NEXT: s_cbranch_scc0 .LBB6_6
-; UNROLL3-NEXT: .LBB6_7: ; %Flow8
+; UNROLL3-NEXT: .LBB6_7: ; %Flow6
; UNROLL3-NEXT: s_or_b32 exec_lo, exec_lo, s8
; UNROLL3-NEXT: s_setpc_b64 s[30:31]
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
index 272daa9..dd5c247 100644
--- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
@@ -460,10 +460,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB3_3
-; CHECK-NEXT: ; %bb.1: ; %Flow34
+; CHECK-NEXT: ; %bb.1: ; %Flow36
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_10
-; CHECK-NEXT: .LBB3_2: ; %Flow35
+; CHECK-NEXT: .LBB3_2: ; %Flow37
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -494,7 +494,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB3_5
-; CHECK-NEXT: .LBB3_6: ; %Flow29
+; CHECK-NEXT: .LBB3_6: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB3_9
@@ -520,7 +520,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB3_8
-; CHECK-NEXT: .LBB3_9: ; %Flow27
+; CHECK-NEXT: .LBB3_9: ; %Flow29
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
@@ -556,7 +556,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s5
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_cbranch_execnz .LBB3_12
-; CHECK-NEXT: .LBB3_13: ; %Flow33
+; CHECK-NEXT: .LBB3_13: ; %Flow35
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB3_16
@@ -584,7 +584,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_15
-; CHECK-NEXT: .LBB3_16: ; %Flow31
+; CHECK-NEXT: .LBB3_16: ; %Flow33
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s5
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
@@ -907,10 +907,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB6_3
-; CHECK-NEXT: ; %bb.1: ; %Flow41
+; CHECK-NEXT: ; %bb.1: ; %Flow39
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_10
-; CHECK-NEXT: .LBB6_2: ; %Flow42
+; CHECK-NEXT: .LBB6_2: ; %Flow40
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_setpc_b64 s[30:31]
; CHECK-NEXT: .LBB6_3: ; %memmove_copy_forward
@@ -940,7 +940,7 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB6_5
-; CHECK-NEXT: .LBB6_6: ; %Flow36
+; CHECK-NEXT: .LBB6_6: ; %Flow34
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB6_9
@@ -966,11 +966,11 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB6_8
-; CHECK-NEXT: .LBB6_9: ; %Flow34
+; CHECK-NEXT: .LBB6_9: ; %Flow32
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
@@ -1002,15 +1002,15 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s5
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_cbranch_execnz .LBB6_12
-; CHECK-NEXT: .LBB6_13: ; %Flow40
+; CHECK-NEXT: .LBB6_13: ; %Flow38
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB6_16
; CHECK-NEXT: ; %bb.14: ; %memmove_bwd_main_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: v_add_co_u32 v0, vcc_lo, v0, -16
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_15: ; %memmove_bwd_main_loop
@@ -1030,7 +1030,7 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_15
-; CHECK-NEXT: .LBB6_16: ; %Flow38
+; CHECK-NEXT: .LBB6_16: ; %Flow36
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s5
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -1181,8 +1181,8 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .LBB8_9: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
@@ -1219,10 +1219,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB8_16
; CHECK-NEXT: ; %bb.14: ; %memmove_bwd_main_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: v_add_co_u32 v0, vcc_lo, v0, -16
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_15: ; %memmove_bwd_main_loop
diff --git a/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll b/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll
new file mode 100644
index 0000000..c4a48a46
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll
@@ -0,0 +1,34 @@
+; REQUIRES: asserts
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -debug-only=gcn-subtarget < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s 2>&1 | FileCheck -check-prefixes=WARNING %s
+
+; CHECK: Post-MI-sched direction (postra-sched-topdown): topdown
+define float @postra-sched-topdown(float %input) nounwind #0 {
+ %x = fadd float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-bottomup): bottomup
+define float @postra-sched-bottomup(float %input) nounwind #1 {
+ %x = fsub float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-bidirectional): bidirectional
+define float @postra-sched-bidirectional(float %input) nounwind #2 {
+ %x = fadd float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-warning): topdown
+; WARNING: invalid value for postRA direction attribute
+define float @postra-sched-warning(float %input) nounwind #3 {
+ %x = fsub float %input, 1.000000e+00
+ ret float %x
+}
+
+attributes #0 = {"amdgpu-post-ra-direction"="topdown"}
+attributes #1 = {"amdgpu-post-ra-direction"="bottomup"}
+attributes #2 = {"amdgpu-post-ra-direction"="bidirectional"}
+attributes #3 = {"amdgpu-post-ra-direction"="warning"}
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 228420e..9f0ffbc 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -56,7 +56,7 @@ define float @v_rcp_f32_ieee_unsafe(float %x) #4 {
; R600: ; %bb.0:
; R600-NEXT: CF_END
; R600-NEXT: PAD
- %rcp = fdiv float 1.0, %x
+ %rcp = fdiv afn float 1.0, %x
ret float %rcp
}
@@ -1411,10 +1411,10 @@ define amdgpu_kernel void @s_div_arcp_neg_k_x_pat_f32_daz(ptr addrspace(1) %out)
declare float @llvm.fabs.f32(float) #1
declare float @llvm.sqrt.f32(float) #1
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
index f7e0388..f967e95 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -65,8 +65,8 @@ define amdgpu_kernel void @rsq_f32(ptr addrspace(1) noalias %out, ptr addrspace(
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -103,8 +103,8 @@ define amdgpu_kernel void @rsq_f32_sgpr(ptr addrspace(1) noalias %out, float %va
; GCN-UNSAFE-NEXT: s_mov_b32 s2, -1
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-UNSAFE-NEXT: s_endpgm
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -196,7 +196,7 @@ define amdgpu_kernel void @rsqrt_fmul(ptr addrspace(1) %out, ptr addrspace(1) %i
%x = call contract float @llvm.sqrt.f32(float %a)
%y = fmul contract float %x, %b
- %z = fdiv arcp contract float %c, %y
+ %z = fdiv arcp afn contract float %c, %y
store float %z, ptr addrspace(1) %out.gep
ret void
}
@@ -258,8 +258,8 @@ define amdgpu_kernel void @neg_rsq_f32(ptr addrspace(1) noalias %out, ptr addrsp
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -322,8 +322,8 @@ define amdgpu_kernel void @neg_rsq_neg_f32(ptr addrspace(1) noalias %out, ptr ad
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -343,8 +343,8 @@ define float @v_neg_rsq_neg_f32(float %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -367,8 +367,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg <2 x float> %val
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -387,8 +387,8 @@ define float @v_neg_rsq_neg_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.neg = fneg float %val0
- %sqrt = call contract float @llvm.sqrt.f32(float %val0.neg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0.neg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -412,8 +412,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32_foldable_user(<2 x float> %val0, <2 x fl
; GCN-IEEE-NEXT: v_mul_f32_e64 v1, -v1, v3
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.fneg = fneg <2 x float> %val0
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -432,8 +432,8 @@ define float @v_neg_rsq_f32(float %val) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -455,8 +455,8 @@ define <2 x float> @v_neg_rsq_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -474,8 +474,8 @@ define float @v_neg_rsq_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -643,8 +643,8 @@ define <2 x float> @v_neg_rsq_v2f32_foldable_user(<2 x float> %val0, <2 x float>
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v0, v0, v2
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v1, v1, v3
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -672,8 +672,8 @@ define float @v_rsq_f32(float %val) {
; GCN-IEEE-SAFE-NEXT: v_cndmask_b32_e64 v1, 0, 12, vcc
; GCN-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v0, v1
; GCN-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -756,9 +756,9 @@ define { float, float } @v_rsq_f32_multi_use(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v2, vcc, 0, v2
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v1, v1, v2
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
%insert.0 = insertvalue { float, float } poison, float %sqrt, 0
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
%insert.1 = insertvalue { float, float } %insert.0, float %div, 1
ret { float, float } %insert.1
}
@@ -838,8 +838,8 @@ define float @v_rsq_f32_missing_contract0(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v0, vcc, 0, v0
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v1, v0
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -855,8 +855,8 @@ define float @v_rsq_f32_missing_contract1(float %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -876,8 +876,8 @@ define float @v_rsq_f32_contractable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -897,8 +897,8 @@ define float @v_rsq_f32_contractable_user_missing_contract0(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -918,8 +918,8 @@ define float @v_rsq_f32_contractable_user_missing_contract1(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd float %div, %val1
ret float %add
}
@@ -953,8 +953,8 @@ define float @v_rsq_f32_known_never_posdenormal(float nofpclass(psub) %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
index b78cbb0..4aac193 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
@@ -4504,7 +4504,7 @@ define <2 x double> @v_rsq_v2f64__afn_nnan_ninf(<2 x double> %x) {
ret <2 x double> %rsq
}
-define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
+define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) {
; SI-SDAG-LABEL: s_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0
@@ -4648,8 +4648,8 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
; VI-GISEL-NEXT: v_readfirstlane_b32 s0, v0
; VI-GISEL-NEXT: v_readfirstlane_b32 s1, v1
; VI-GISEL-NEXT: ; return to shader part epilog
- %rsq = call contract double @llvm.sqrt.f64(double %x)
- %result = fdiv contract double 1.0, %rsq
+ %rsq = call contract afn double @llvm.sqrt.f64(double %x)
+ %result = fdiv contract afn double 1.0, %rsq
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
@@ -4660,7 +4660,7 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
ret <2 x i32> %insert.1
}
-define double @v_rsq_f64_unsafe(double %x) #0 {
+define double @v_rsq_f64_unsafe(double %x) {
; SI-SDAG-LABEL: v_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4800,8 +4800,8 @@ define double @v_rsq_f64_unsafe(double %x) #0 {
; VI-GISEL-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], 1.0
; VI-GISEL-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[2:3]
; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call double @llvm.sqrt.f64(double %x)
- %rsq = fdiv double 1.0, %sqrt
+ %sqrt = call afn contract double @llvm.sqrt.f64(double %x)
+ %rsq = fdiv afn contract double 1.0, %sqrt
ret double %rsq
}
@@ -5737,7 +5737,6 @@ define double @v_div_const_contract_sqrt_f64(double %x) {
ret double %rsq
}
-attributes #0 = { "unsafe-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN: {{.*}}
; GISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/udivrem24.ll b/llvm/test/CodeGen/AMDGPU/udivrem24.ll
index 5477d62..1e5ec59 100644
--- a/llvm/test/CodeGen/AMDGPU/udivrem24.ll
+++ b/llvm/test/CodeGen/AMDGPU/udivrem24.ll
@@ -1,18 +1,103 @@
-; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=VI %s
+; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s
-; FUNC-LABEL: {{^}}udiv24_i8:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i8:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -21,17 +106,101 @@ define amdgpu_kernel void @udiv24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_in_out:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_in_out(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; SI-LABEL: udiv24_i8_denorm_flush_in_out:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_in_out:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_in_out:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -40,17 +209,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_in_out(ptr addrspace(1) %out,
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_in:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_in(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; SI-LABEL: udiv24_i8_denorm_flush_in:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_in:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_in:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -59,17 +312,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_in(ptr addrspace(1) %out, ptr
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_out:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_out(ptr addrspace(1) %out, ptr addrspace(1) %in) #2 {
+; SI-LABEL: udiv24_i8_denorm_flush_out:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_out:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_out:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -78,17 +415,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_out(ptr addrspace(1) %out, ptr
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i16:
-; SI: v_cvt_f32_u32
-; SI: v_cvt_f32_u32
-; SI: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ushort v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0 offset:2
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_u32_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_u32_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
+; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_u32_e32 v0, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_u32_e32 v1, v1
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i16:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 2, #1
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i16, ptr addrspace(1) %in, i16 1
%num = load i16, ptr addrspace(1) %in, align 2
%den = load i16, ptr addrspace(1) %den_ptr, align 2
@@ -97,17 +518,85 @@ define amdgpu_kernel void @udiv24_i16(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}udiv23_i32:
-; SI: v_cvt_f32_u32
-; SI-DAG: v_cvt_f32_u32
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -120,11 +609,88 @@ define amdgpu_kernel void @udiv23_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @udiv24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -137,11 +703,88 @@ define amdgpu_kernel void @udiv24_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}no_udiv24_u23_u24_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @no_udiv24_u23_u24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: no_udiv24_u23_u24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: no_udiv24_u23_u24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: no_udiv24_u23_u24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -154,11 +797,88 @@ define amdgpu_kernel void @no_udiv24_u23_u24_i32(ptr addrspace(1) %out, ptr addr
ret void
}
-; FUNC-LABEL: {{^}}no_udiv24_u24_u23_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @no_udiv24_u24_u23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: no_udiv24_u24_u23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: no_udiv24_u24_u23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: no_udiv24_u24_u23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -171,14 +891,113 @@ define amdgpu_kernel void @no_udiv24_u24_u23_i32(ptr addrspace(1) %out, ptr addr
ret void
}
-; FUNC-LABEL: {{^}}udiv25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @udiv25_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv25_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv25_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv25_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -191,14 +1010,113 @@ define amdgpu_kernel void @udiv25_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}test_no_udiv24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_udiv24_i32_1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_udiv24_i32_1:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_udiv24_i32_1:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0xffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_udiv24_i32_1:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -211,14 +1129,113 @@ define amdgpu_kernel void @test_no_udiv24_i32_1(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_no_udiv24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_udiv24_i32_2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_udiv24_i32_2:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_udiv24_i32_2:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_udiv24_i32_2:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -231,17 +1248,107 @@ define amdgpu_kernel void @test_no_udiv24_i32_2(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}urem24_i8:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @urem24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i8:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v3, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v4, v3
+; SI-NEXT: v_mul_f32_e32 v4, v2, v4
+; SI-NEXT: v_trunc_f32_e32 v4, v4
+; SI-NEXT: v_fma_f32 v2, -v4, v3, v2
+; SI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v2|, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
+; SI-NEXT: v_mul_lo_u32 v1, v2, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i8:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v3, v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v4, v1
+; VI-NEXT: v_mul_f32_e32 v3, v4, v3
+; VI-NEXT: v_trunc_f32_e32 v3, v3
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v3
+; VI-NEXT: v_mad_f32 v3, -v3, v2, v4
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v2
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v2, v0
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i8:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 25, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.W, T0.X,
+; EG-NEXT: MUL_IEEE * T1.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: MULADD_IEEE T0.W, -PV.W, T0.Y, T0.W,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: SETGE * T0.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.Y, T1.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.X,
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT * T1.W, T0.X, PS,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -250,17 +1357,107 @@ define amdgpu_kernel void @urem24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in
ret void
}
-; FUNC-LABEL: {{^}}urem24_i16:
-; SI: v_cvt_f32_u32
-; SI: v_cvt_f32_u32
-; SI: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @urem24_i16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ushort v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0 offset:2
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_u32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v4, v3
+; SI-NEXT: v_mul_f32_e32 v4, v2, v4
+; SI-NEXT: v_trunc_f32_e32 v4, v4
+; SI-NEXT: v_fma_f32 v2, -v4, v3, v2
+; SI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v2|, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
+; SI-NEXT: v_mul_lo_u32 v1, v2, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
+; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_u32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; VI-NEXT: v_rcp_iflag_f32_e32 v4, v2
+; VI-NEXT: v_mul_f32_e32 v4, v3, v4
+; VI-NEXT: v_trunc_f32_e32 v4, v4
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v4
+; VI-NEXT: v_mad_f32 v3, -v4, v2, v3
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v2
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v2, v0
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i16:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 25, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 2, #1
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.W, T0.X,
+; EG-NEXT: MUL_IEEE * T1.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: MULADD_IEEE T0.W, -PV.W, T0.Y, T0.W,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: SETGE * T0.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.Y, T1.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.X,
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT * T1.W, T0.X, PS,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i16, ptr addrspace(1) %in, i16 1
%num = load i16, ptr addrspace(1) %in, align 2
%den = load i16, ptr addrspace(1) %den_ptr, align 2
@@ -269,10 +1466,90 @@ define amdgpu_kernel void @urem24_i16(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}urem24_i32:
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @urem24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s6, s4, 0xffffff
+; SI-NEXT: s_and_b32 s7, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s6
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s7
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_mul_lo_u32 v0, v0, s5
+; SI-NEXT: v_sub_i32_e32 v0, vcc, s4, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s2, s5, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s2
+; VI-NEXT: s_and_b32 s2, s4, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v0, s5
+; VI-NEXT: v_sub_u32_e32 v0, vcc, s4, v0
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -285,14 +1562,105 @@ define amdgpu_kernel void @urem24_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}urem25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @urem25_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem25_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s4, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem25_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem25_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -305,14 +1673,105 @@ define amdgpu_kernel void @urem25_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}test_no_urem24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_urem24_i32_1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_urem24_i32_1:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0xffffff
+; SI-NEXT: s_and_b32 s4, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_urem24_i32_1:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0xffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_urem24_i32_1:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -325,14 +1784,105 @@ define amdgpu_kernel void @test_no_urem24_i32_1(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_no_urem24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_urem24_i32_2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_urem24_i32_2:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s4, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_urem24_i32_2:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_urem24_i32_2:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -345,12 +1895,85 @@ define amdgpu_kernel void @test_no_urem24_i32_2(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_udiv24_u16_u23_i32:
-; SI: v_rcp_iflag_f32
-; SI: v_and_b32_e32 v{{[0-9]+}}, 0x7fffff,
-
-; EG: RECIP_IEEE
define amdgpu_kernel void @test_udiv24_u16_u23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_udiv24_u16_u23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_udiv24_u16_u23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_udiv24_u16_u23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -363,12 +1986,85 @@ define amdgpu_kernel void @test_udiv24_u16_u23_i32(ptr addrspace(1) %out, ptr ad
ret void
}
-; FUNC-LABEL: {{^}}test_udiv24_u23_u16_i32:
-; SI: v_rcp_iflag_f32
-; SI: v_and_b32_e32 v{{[0-9]+}}, 0x7fffff,
-
-; EG: RECIP_IEEE
define amdgpu_kernel void @test_udiv24_u23_u16_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_udiv24_u23_u16_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_udiv24_u23_u16_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_udiv24_u23_u16_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll b/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll
new file mode 100644
index 0000000..ea12732
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll
@@ -0,0 +1,16 @@
+; RUN: not llc -mcpu=gfx1100 -mtriple=amdgcn-amd-amdhsa -stress-regalloc=4 -filetype=null -verify-machineinstrs %s 2>&1 | FileCheck %s
+
+; CHECK: error: <unknown>:0:0: ran out of registers during register allocation in function 'f'
+; CHECK-NOT: Bad machine code
+
+define <16 x half> @f(i1 %LGV2, <16 x half> %0) {
+BB:
+ br i1 %LGV2, label %SW_C3, label %SW_C
+
+SW_C: ; preds = %BB
+ %B1 = fmul <16 x half> %0, zeroinitializer
+ ret <16 x half> %B1
+
+SW_C3: ; preds = %BB
+ ret <16 x half> <half 0xH0000, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison>
+}
diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
index aea2a8b..f2ecfe8 100644
--- a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s
define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
; GFX950-LABEL: v_ashr_pk_i8_i32:
; GFX950: ; %bb.0:
@@ -13,6 +14,20 @@ define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_i8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_i8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_i8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
@@ -40,6 +55,20 @@ define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_u8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_u8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_u8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
diff --git a/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..8f7b050
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..05e1e8b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..a65e5c5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/nop_concat_vectors.ll b/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
index cda1e83..aa3cdc3 100644
--- a/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
+++ b/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
@@ -1,10 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
-;CHECK: _foo
-;CHECK-NOT: vld1.32
-;CHECK-NOT: vst1.32
-;CHECK: bx
define void @foo(ptr %J) {
+; CHECK-LABEL: foo:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: bx lr
%A = load <16 x i8>, ptr %J
%T1 = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%T2 = shufflevector <8 x i8> %T1, <8 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
index 0f711630..4a65a53 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Descriptor Range type: Invalid
+; CHECK: error: Invalid Descriptor Range type
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
index 6551116..031dfca 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: NOTRootFlags
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
index 579528d..2739320 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: Invalid
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
entry:
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
index 7e7d56e..855e0c0 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MaxLOD: 0
+; CHECK: error: Invalid value for MaxLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
index d958f10..812749b 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MinLOD: 0
+; CHECK: error: Invalid value for MinLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
index 34b27eb..6898aec 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MipLODBias: 666
+; CHECK: error: Invalid value for MipLODBias: 6.660000e+02
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
index b23366b..f5430df 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
@@ -1,20 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -verify-machineinstrs < %s | FileCheck %s
@.str = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, 3'RD STRING\00", align 1
@.str1 = private unnamed_addr constant [3 x i8] c"%s\00", align 1
-; Function Attrs: nounwind
declare i32 @printf(i8* nocapture readonly, ...)
; Function Attrs: nounwind
-define i32 @main() {
+define i32 @main() nounwind {
; CHECK-LABEL: main:
-; CHECK: .cfi_startproc
-; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: .cfi_def_cfa r30, 8
-; CHECK-NEXT: .cfi_offset r31, -4
-; CHECK-NEXT: .cfi_offset r30, -8
+; CHECK: // %bb.0: // %entry
; CHECK-NEXT: {
; CHECK-NEXT: r0 = ##.L.str1
; CHECK-NEXT: r3:2 = CONST64(#2325073635944967245)
@@ -53,5 +48,4 @@ entry:
ret i32 0
}
-; Function Attrs: nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
diff --git a/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..3f7590a
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,91 @@
+;; Test MIR printer and parser for type id field in call site info. Test that
+;; it works well with/without --emit-call-site-info.
+
+;; Multiplex --call-graph-section and -emit-call-site-info as both utilize
+;; CallSiteInfo and callSites.
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with --call-graph-section only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is not set, calleeTypeIds is set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section %s -stop-after=finalize-isel -o %t1.mir
+; RUN: cat %t1.mir | FileCheck %s --check-prefix=PRINTER_CGS
+; PRINTER_CGS: name: main
+; PRINTER_CGS: callSites:
+; PRINTER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PRINTER_CGS-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section %t1.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS
+; PARSER_CGS: name: main
+; PARSER_CGS: callSites:
+; PARSER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PARSER_CGS-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with -emit-call-site-info only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is set, calleeTypeIds is not set.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CSI
+; PRINTER_CSI: name: main
+; PRINTER_CSI: callSites:
+; PRINTER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PRINTER_CSI-NOT: calleeTypeIds:
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CSI
+; PARSER_CSI: name: main
+; PARSER_CSI: callSites:
+; PARSER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PARSER_CSI-NOT: calleeTypeIds:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with both -emit-call-site-info and --call-graph-section.
+
+;; Test printer.
+;; Verify both fwdArgRegs and calleeTypeIds are set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CGS_CSI
+; PRINTER_CGS_CSI: name: main
+; PRINTER_CGS_CSI: callSites:
+; PRINTER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PRINTER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS_CSI
+; PARSER_CGS_CSI: name: main
+; PARSER_CGS_CSI: callSites:
+; PARSER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PARSER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..a66a884
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..e7f162c
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..9f5e858
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/NVPTX/bug26185-2.ll b/llvm/test/CodeGen/NVPTX/bug26185-2.ll
index 4e11f58..46172b1 100644
--- a/llvm/test/CodeGen/NVPTX/bug26185-2.ll
+++ b/llvm/test/CodeGen/NVPTX/bug26185-2.ll
@@ -16,7 +16,7 @@ define ptx_kernel void @spam(ptr addrspace(1) noalias nocapture readonly %arg, p
; CHECK: .maxntid 1, 1, 1
; CHECK-NEXT: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-NEXT: .reg .b64 %rd<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %bb
; CHECK-NEXT: ld.param.b64 %rd1, [spam_param_0];
@@ -25,10 +25,9 @@ define ptx_kernel void @spam(ptr addrspace(1) noalias nocapture readonly %arg, p
; CHECK-NEXT: add.s64 %rd4, %rd1, %rd3;
; CHECK-NEXT: ld.param.b64 %rd5, [spam_param_1];
; CHECK-NEXT: ld.global.nc.s16 %r1, [%rd4+16];
-; CHECK-NEXT: mul.wide.s32 %rd6, %r1, %r1;
-; CHECK-NEXT: ld.global.b64 %rd7, [%rd5];
-; CHECK-NEXT: add.s64 %rd8, %rd6, %rd7;
-; CHECK-NEXT: st.global.b64 [%rd5], %rd8;
+; CHECK-NEXT: ld.global.b64 %rd6, [%rd5];
+; CHECK-NEXT: mad.wide.s32 %rd7, %r1, %r1, %rd6;
+; CHECK-NEXT: st.global.b64 [%rd5], %rd7;
; CHECK-NEXT: ret;
bb:
%tmp5 = add nsw i64 %arg3, 8
diff --git a/llvm/test/CodeGen/NVPTX/combine-wide.ll b/llvm/test/CodeGen/NVPTX/combine-wide.ll
new file mode 100644
index 0000000..ed4a2b6
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/combine-wide.ll
@@ -0,0 +1,1339 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -O1 | FileCheck %s --check-prefixes=CHECK,O1
+; RUN: llc < %s -O0 | FileCheck %s --check-prefixes=CHECK,O0
+
+target triple = "nvptx64-nvidia-cuda"
+
+define i64 @t1(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t1(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t1_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t1_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t1_param_2];
+; O1-NEXT: mad.wide.s32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t1(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t1_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t1_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t1_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd1, %rd2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ %add = add i64 %c, %sext
+ ret i64 %add
+}
+
+define i64 @t2(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t2(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t2_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t2_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t2_param_2];
+; O1-NEXT: mad.wide.s32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t2(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t2_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t2_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t2_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd2, %rd1;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ %add = add i64 %sext, %c
+ ret i64 %add
+}
+
+define i64 @t3(i32 %a, i32 %b) {
+;
+; O1-LABEL: t3(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t3_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t3_param_1];
+; O1-NEXT: mad.wide.s32 %rd1, %r1, %r2, 1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t3(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t3_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t3_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: add.s64 %rd2, %rd1, 1;
+; O0-NEXT: st.param.b64 [func_retval0], %rd2;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ %add = add i64 1, %sext
+ ret i64 %add
+}
+
+define i64 @t4(i32 %a, i64 %c) {
+;
+; O1-LABEL: t4(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t4_param_0];
+; O1-NEXT: ld.param.b64 %rd1, [t4_param_1];
+; O1-NEXT: mad.wide.s32 %rd2, %r1, 3, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t4(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t4_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t4_param_0];
+; O0-NEXT: mul.lo.s32 %r2, %r1, 3;
+; O0-NEXT: cvt.s64.s32 %rd2, %r2;
+; O0-NEXT: add.s64 %rd3, %rd1, %rd2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, 3
+ %sext = sext i32 %mul to i64
+ %add = add i64 %c, %sext
+ ret i64 %add
+}
+
+define i64 @t4_1(i32 %a, i64 %c) {
+;
+; O1-LABEL: t4_1(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t4_1_param_0];
+; O1-NEXT: mad.wide.s32 %rd1, %r1, 3, 5;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t4_1(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t4_1_param_0];
+; O0-NEXT: mul.lo.s32 %r2, %r1, 3;
+; O0-NEXT: cvt.s64.s32 %rd1, %r2;
+; O0-NEXT: add.s64 %rd2, %rd1, 5;
+; O0-NEXT: st.param.b64 [func_retval0], %rd2;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, 3
+ %sext = sext i32 %mul to i64
+ %add = add i64 5, %sext
+ ret i64 %add
+}
+
+define i64 @t5(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t5(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t5_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t5_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t5_param_2];
+; O1-NEXT: mad.wide.u32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t5(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t5_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t5_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t5_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd1, %rd2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ %add = add i64 %c, %zext
+ ret i64 %add
+}
+
+define i64 @t6(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t6(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t6_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t6_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t6_param_2];
+; O1-NEXT: mad.wide.u32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t6(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t6_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t6_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t6_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd2, %rd1;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ %add = add i64 %zext, %c
+ ret i64 %add
+}
+
+define i32 @t7(i16 %a, i16 %b) {
+;
+; O1-LABEL: t7(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t7_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t7_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t7(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t7_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t7_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t8(i16 %a, i16 %b) {
+;
+; O1-LABEL: t8(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t8_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t8_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.s32.s16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t8(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t8_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t8_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t9(i32 %a, i32 %b) {
+;
+; O1-LABEL: t9(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t9_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t9_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.u64.u32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t9(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t9_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t9_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul i32 %a, %b
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t10(i32 %a, i32 %b) {
+;
+; O1-LABEL: t10(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t10_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t10_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.s64.s32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t10(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t10_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t10_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t11(i16 %a, i16 %b) {
+;
+; O1-LABEL: t11(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t11_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t11_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t11(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t11_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t11_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t12(i16 %a, i16 %b) {
+;
+; O1-LABEL: t12(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t12_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t12_param_1];
+; O1-NEXT: mul.wide.s16 %r1, %rs1, %rs2;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t12(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t12_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t12_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t13(i32 %a, i32 %b) {
+;
+; O1-LABEL: t13(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t13_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t13_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.u64.u32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t13(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t13_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t13_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t14(i32 %a, i32 %b) {
+;
+; O1-LABEL: t14(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t14_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t14_param_1];
+; O1-NEXT: mul.wide.s32 %rd1, %r1, %r2;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t14(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t14_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t14_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t15(i16 %a, i16 %b) {
+;
+; O1-LABEL: t15(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t15_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t15_param_1];
+; O1-NEXT: mul.wide.u16 %r1, %rs1, %rs2;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t15(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t15_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t15_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t16(i16 %a, i16 %b) {
+;
+; O1-LABEL: t16(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t16_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t16_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.s32.s16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t16(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t16_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t16_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t17(i32 %a, i32 %b) {
+;
+; O1-LABEL: t17(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t17_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t17_param_1];
+; O1-NEXT: mul.wide.u32 %rd1, %r1, %r2;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t17(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t17_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t17_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t18(i32 %a, i32 %b) {
+;
+; O1-LABEL: t18(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t18_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t18_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.s64.s32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t18(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t18_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t18_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t19(i16 %a, i16 %b) {
+;
+; O1-LABEL: t19(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t19_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t19_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t19(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t19_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t19_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t20(i16 %a) {
+;
+; CHECK-LABEL: t20(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b16 %rs1, [t20_param_0];
+; CHECK-NEXT: shl.b16 %rs2, %rs1, 4;
+; CHECK-NEXT: cvt.s32.s16 %r1, %rs2;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %mul = shl i16 %a, 4
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t21(i32 %a) {
+;
+; CHECK-LABEL: t21(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t21_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.u64.u32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl i32 %a, 4
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t22(i32 %a) {
+;
+; CHECK-LABEL: t22(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t22_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.s64.s32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl i32 %a, 4
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t23(i16 %a, i16 %b) {
+;
+; CHECK-LABEL: t23(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b16 %rs1, [t23_param_0];
+; CHECK-NEXT: shl.b16 %rs2, %rs1, 4;
+; CHECK-NEXT: cvt.u32.u16 %r1, %rs2;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %mul = shl nsw i16 %a, 4
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t24(i16 %a, i16 %b) {
+;
+; O1-LABEL: t24(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t24_param_0];
+; O1-NEXT: mul.wide.s16 %r1, %rs1, 16;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t24(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs1, [t24_param_0];
+; O0-NEXT: shl.b16 %rs2, %rs1, 4;
+; O0-NEXT: cvt.s32.s16 %r1, %rs2;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = shl nsw i16 %a, 4
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t25(i32 %a) {
+;
+; CHECK-LABEL: t25(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t25_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.u64.u32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl nsw i32 %a, 4
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t26(i32 %a) {
+;
+; O1-LABEL: t26(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t26_param_0];
+; O1-NEXT: mul.wide.s32 %rd1, %r1, 16;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t26(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t26_param_0];
+; O0-NEXT: shl.b32 %r2, %r1, 4;
+; O0-NEXT: cvt.s64.s32 %rd1, %r2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = shl nsw i32 %a, 4
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t27(i16 %a, i16 %b) {
+;
+; O1-LABEL: t27(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t27_param_0];
+; O1-NEXT: mul.wide.u16 %r1, %rs1, 16;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t27(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs1, [t27_param_0];
+; O0-NEXT: shl.b16 %rs2, %rs1, 4;
+; O0-NEXT: cvt.u32.u16 %r1, %rs2;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = shl nuw i16 %a, 4
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t28(i16 %a, i16 %b) {
+;
+; CHECK-LABEL: t28(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b16 %rs1, [t28_param_0];
+; CHECK-NEXT: shl.b16 %rs2, %rs1, 4;
+; CHECK-NEXT: cvt.s32.s16 %r1, %rs2;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %mul = shl nuw i16 %a, 4
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t29(i32 %a) {
+;
+; O1-LABEL: t29(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t29_param_0];
+; O1-NEXT: mul.wide.u32 %rd1, %r1, 16;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t29(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t29_param_0];
+; O0-NEXT: shl.b32 %r2, %r1, 4;
+; O0-NEXT: cvt.u64.u32 %rd1, %r2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = shl nuw i32 %a, 4
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t30(i32 %a) {
+;
+; CHECK-LABEL: t30(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t30_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.s64.s32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl nuw i32 %a, 4
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i64 @t31(i32 %a, i32 %b) {
+;
+; O1-LABEL: t31(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t31_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t31_param_1];
+; O1-NEXT: shl.b32 %r3, %r1, %r2;
+; O1-NEXT: cvt.s64.s32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t31(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t31_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t31_param_0];
+; O0-NEXT: shl.b32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = shl nuw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t32(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t32(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t32_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t32_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t32_param_2];
+; O1-NEXT: mad.wide.s16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t32(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t32_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t32_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t32_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ %add = add i32 %c, %sext
+ ret i32 %add
+}
+
+define i32 @t33(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t33(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t33_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t33_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t33_param_2];
+; O1-NEXT: mad.wide.s16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t33(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t33_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t33_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t33_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ %add = add i32 %c, %sext
+ ret i32 %add
+}
+
+define i32 @t34(i16 %a, i16 %b) {
+;
+; O1-LABEL: t34(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t34_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t34_param_1];
+; O1-NEXT: mad.wide.s16 %r1, %rs1, %rs2, 1;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t34(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t34_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t34_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: add.s32 %r2, %r1, 1;
+; O0-NEXT: st.param.b32 [func_retval0], %r2;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ %add = add i32 1, %sext
+ ret i32 %add
+}
+
+define i32 @t35(i16 %a, i32 %c) {
+;
+; O1-LABEL: t35(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t35_param_0];
+; O1-NEXT: ld.param.b32 %r1, [t35_param_1];
+; O1-NEXT: mad.wide.s16 %r2, %rs1, 3, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t35(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t35_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t35_param_0];
+; O0-NEXT: mul.lo.s16 %rs2, %rs1, 3;
+; O0-NEXT: cvt.s32.s16 %r2, %rs2;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, 3
+ %sext = sext i16 %mul to i32
+ %add = add i32 %c, %sext
+ ret i32 %add
+}
+
+define i32 @t36(i16 %a, i32 %c) {
+;
+; O1-LABEL: t36(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t36_param_0];
+; O1-NEXT: mad.wide.s16 %r1, %rs1, 3, 5;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t36(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs1, [t36_param_0];
+; O0-NEXT: mul.lo.s16 %rs2, %rs1, 3;
+; O0-NEXT: cvt.s32.s16 %r1, %rs2;
+; O0-NEXT: add.s32 %r2, %r1, 5;
+; O0-NEXT: st.param.b32 [func_retval0], %r2;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, 3
+ %sext = sext i16 %mul to i32
+ %add = add i32 5, %sext
+ ret i32 %add
+}
+
+define i32 @t37(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t37(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t37_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t37_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t37_param_2];
+; O1-NEXT: mad.wide.u16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t37(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t37_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t37_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t37_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ %add = add i32 %c, %zext
+ ret i32 %add
+}
+
+define i32 @t38(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t38(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t38_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t38_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t38_param_2];
+; O1-NEXT: mad.wide.u16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t38(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t38_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t38_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t38_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r2, %r1;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ %add = add i32 %zext, %c
+ ret i32 %add
+}
+
+define i64 @t39(i16 %a, i16 %b) {
+; O1-LABEL: t39(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t39_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t39_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t39(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t39_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t39_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %zext = zext i16 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t40(i16 %a, i16 %b) {
+; O1-LABEL: t40(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t40_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t40_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t40(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t40_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t40_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t41(i16 %a, i16 %b) {
+; O1-LABEL: t41(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t41_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t41_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.s64.s16 %rd1, %rs3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t41(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t41_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t41_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s64.s16 %rd1, %rs3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t42(i16 %a, i16 %b, ptr %ptr) {
+; O1-LABEL: t42(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t42_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t42_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: ld.param.b64 %rd1, [t42_param_2];
+; O1-NEXT: st.b16 [%rd1], %rs3;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t42(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t42_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t42_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t42_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: st.b16 [%rd1], %rs3;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ store i16 %mul, ptr %ptr
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t43(i16 %a, i16 %b, i32 %c, ptr %ptr) {
+; O1-LABEL: t43(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t43_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t43_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: ld.param.b64 %rd1, [t43_param_3];
+; O1-NEXT: st.b16 [%rd1], %rs3;
+; O1-NEXT: ld.param.b32 %r1, [t43_param_2];
+; O1-NEXT: cvt.u32.u16 %r2, %rs3;
+; O1-NEXT: add.s32 %r3, %r2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r3;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t43(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t43_param_3];
+; O0-NEXT: ld.param.b32 %r1, [t43_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t43_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t43_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: st.b16 [%rd1], %rs3;
+; O0-NEXT: cvt.u32.u16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r2, %r1;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ store i16 %mul, ptr %ptr
+ %zext = zext i16 %mul to i32
+ %add = add i32 %zext, %c
+ ret i32 %add
+}
diff --git a/llvm/test/CodeGen/NVPTX/fold-movs.ll b/llvm/test/CodeGen/NVPTX/fold-movs.ll
new file mode 100644
index 0000000..6ee0fb2
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fold-movs.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK-F32X2
+; RUN: %if ptxas-12.7 %{ \
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+target triple = "nvptx64-nvidia-cuda"
+
+; Since fdiv doesn't support f32x2, this will create BUILD_VECTORs that will be
+; folded into the store, turning it into st.global.v8.b32.
+define void @writevec(<8 x float> %v1, <8 x float> %v2, ptr addrspace(1) %p) {
+; CHECK-F32X2-LABEL: writevec(
+; CHECK-F32X2: {
+; CHECK-F32X2-NEXT: .reg .b32 %r<25>;
+; CHECK-F32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT: // %bb.0:
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [writevec_param_0];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [writevec_param_0+16];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [writevec_param_1+16];
+; CHECK-F32X2-NEXT: div.rn.f32 %r13, %r8, %r12;
+; CHECK-F32X2-NEXT: div.rn.f32 %r14, %r7, %r11;
+; CHECK-F32X2-NEXT: div.rn.f32 %r15, %r6, %r10;
+; CHECK-F32X2-NEXT: div.rn.f32 %r16, %r5, %r9;
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r17, %r18, %r19, %r20}, [writevec_param_1];
+; CHECK-F32X2-NEXT: div.rn.f32 %r21, %r4, %r20;
+; CHECK-F32X2-NEXT: div.rn.f32 %r22, %r3, %r19;
+; CHECK-F32X2-NEXT: div.rn.f32 %r23, %r2, %r18;
+; CHECK-F32X2-NEXT: div.rn.f32 %r24, %r1, %r17;
+; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [writevec_param_2];
+; CHECK-F32X2-NEXT: st.global.v8.b32 [%rd1], {%r24, %r23, %r22, %r21, %r16, %r15, %r14, %r13};
+; CHECK-F32X2-NEXT: ret;
+ %v = fdiv <8 x float> %v1, %v2
+ store <8 x float> %v, ptr addrspace(1) %p, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index 06c2cc8..26336b8 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -343,61 +343,77 @@ define <4 x i8> @test_smax(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smax(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smax_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smax_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smax(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smax_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smax_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sgt <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
@@ -473,61 +489,77 @@ define <4 x i8> @test_smin(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smin(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smin_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smin_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smin(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smin_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smin_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sle <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
diff --git a/llvm/test/CodeGen/NVPTX/ld-param-sink.ll b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
new file mode 100644
index 0000000..03523a3
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs | FileCheck %s
+; RUN: %if ptxas %{ llc < %s | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare ptr @bar(i64)
+declare i64 @baz()
+
+define ptr @foo(i1 %cond) {
+; CHECK-LABEL: foo(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.b8 %rs1, [foo_param_0];
+; CHECK-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0;
+; CHECK-NEXT: { // callseq 0, 0
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: call.uni (retval0), baz, ();
+; CHECK-NEXT: ld.param.b64 %rd2, [retval0];
+; CHECK-NEXT: } // callseq 0
+; CHECK-NEXT: @%p1 bra $L__BB0_2;
+; CHECK-NEXT: // %bb.1: // %bb
+; CHECK-NEXT: { // callseq 1, 0
+; CHECK-NEXT: .param .b64 param0;
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-NEXT: call.uni (retval0), bar, (param0);
+; CHECK-NEXT: } // callseq 1
+; CHECK-NEXT: $L__BB0_2: // %common.ret
+; CHECK-NEXT: st.param.b64 [func_retval0], 0;
+; CHECK-NEXT: ret;
+entry:
+ %call = call i64 @baz()
+ br i1 %cond, label %common.ret, label %bb
+
+bb:
+ %tmp = call ptr @bar(i64 %call)
+ br label %common.ret
+
+common.ret:
+ ret ptr null
+}
diff --git a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
index 5c30173..ae069cf 100644
--- a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
+++ b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
@@ -114,15 +114,14 @@ define void @foo3(i32 %a) {
; PTX64-NEXT: .reg .b64 %SP;
; PTX64-NEXT: .reg .b64 %SPL;
; PTX64-NEXT: .reg .b32 %r<2>;
-; PTX64-NEXT: .reg .b64 %rd<5>;
+; PTX64-NEXT: .reg .b64 %rd<4>;
; PTX64-EMPTY:
; PTX64-NEXT: // %bb.0:
; PTX64-NEXT: mov.b64 %SPL, __local_depot2;
; PTX64-NEXT: ld.param.b32 %r1, [foo3_param_0];
; PTX64-NEXT: add.u64 %rd2, %SPL, 0;
-; PTX64-NEXT: mul.wide.s32 %rd3, %r1, 4;
-; PTX64-NEXT: add.s64 %rd4, %rd2, %rd3;
-; PTX64-NEXT: st.local.b32 [%rd4], %r1;
+; PTX64-NEXT: mad.wide.s32 %rd3, %r1, 4, %rd2;
+; PTX64-NEXT: st.local.b32 [%rd3], %r1;
; PTX64-NEXT: ret;
%local = alloca [3 x i32], align 4
%1 = getelementptr inbounds i32, ptr %local, i32 %a
diff --git a/llvm/test/CodeGen/NVPTX/vector-loads.ll b/llvm/test/CodeGen/NVPTX/vector-loads.ll
index e16fc74..6f0dff7 100644
--- a/llvm/test/CodeGen/NVPTX/vector-loads.ll
+++ b/llvm/test/CodeGen/NVPTX/vector-loads.ll
@@ -154,7 +154,7 @@ define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(1342177
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<4>;
; CHECK-NEXT: .reg .b32 %r<8>;
-; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [foo_complex_param_0];
@@ -166,12 +166,11 @@ define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(1342177
; CHECK-NEXT: shl.b32 %r6, %r1, 1;
; CHECK-NEXT: or.b32 %r7, %r5, %r6;
; CHECK-NEXT: cvt.u64.u32 %rd2, %r7;
-; CHECK-NEXT: mul.wide.u32 %rd3, %r3, 131072;
-; CHECK-NEXT: add.s64 %rd4, %rd1, %rd3;
-; CHECK-NEXT: add.s64 %rd5, %rd4, %rd2;
-; CHECK-NEXT: ld.v2.b8 {%rs1, %rs2}, [%rd5+128];
+; CHECK-NEXT: mad.wide.u32 %rd3, %r3, 131072, %rd1;
+; CHECK-NEXT: add.s64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: ld.v2.b8 {%rs1, %rs2}, [%rd4+128];
; CHECK-NEXT: max.u16 %rs3, %rs1, %rs2;
-; CHECK-NEXT: st.b8 [%rd5+129], %rs3;
+; CHECK-NEXT: st.b8 [%rd4+129], %rs3;
; CHECK-NEXT: ret;
%t0 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x(), !range !1
%t1 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
diff --git a/llvm/test/CodeGen/PowerPC/aix-lower-arbitrary-sized-ints.ll b/llvm/test/CodeGen/PowerPC/aix-lower-arbitrary-sized-ints.ll
new file mode 100644
index 0000000..c119da6
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-lower-arbitrary-sized-ints.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --verify-machineinstrs -mtriple powerpc-ibm-aix-xcoff \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | \
+; RUN: FileCheck %s --check-prefixes=CHECK,CHECK32
+; RUN: llc --verify-machineinstrs -mtriple powerpc64-ibm-aix-xcoff \
+; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | \
+; RUN: FileCheck %s --check-prefixes=CHECK,CHECK64
+
+define ptr @lower_args(ptr %_0, i32 %0, i32 %1, i32 %2, i32 %3, ptr %4, ptr %5, i64 %6, i24 %7) {
+; CHECK-LABEL: lower_args:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: blr
+entry:
+ ret ptr %_0
+}
+
+define i32 @lower_args_withops_zeroext(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i24 %i) {
+; CHECK32-LABEL: lower_args_withops_zeroext:
+; CHECK32: # %bb.0: # %entry
+; CHECK32-NEXT: lwz r3, 56(r1)
+; CHECK32-NEXT: addi r3, r3, 255
+; CHECK32-NEXT: clrlwi r3, r3, 8
+; CHECK32-NEXT: blr
+;
+; CHECK64-LABEL: lower_args_withops_zeroext:
+; CHECK64: # %bb.0: # %entry
+; CHECK64-NEXT: lwz r3, 116(r1)
+; CHECK64-NEXT: addi r3, r3, 255
+; CHECK64-NEXT: clrldi r3, r3, 40
+; CHECK64-NEXT: blr
+entry:
+ %0 = add i24 %i, 255
+ %1 = zext i24 %0 to i32
+ ret i32 %1
+}
+
+define i32 @lower_args_withops_signext(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i24 signext %i) {
+; CHECK32-LABEL: lower_args_withops_signext:
+; CHECK32: # %bb.0: # %entry
+; CHECK32-NEXT: lwz r3, 56(r1)
+; CHECK32-NEXT: slwi r3, r3, 8
+; CHECK32-NEXT: srawi r3, r3, 8
+; CHECK32-NEXT: slwi r3, r3, 8
+; CHECK32-NEXT: addi r3, r3, 22272
+; CHECK32-NEXT: srawi r3, r3, 8
+; CHECK32-NEXT: blr
+;
+; CHECK64-LABEL: lower_args_withops_signext:
+; CHECK64: # %bb.0: # %entry
+; CHECK64-NEXT: lwz r3, 116(r1)
+; CHECK64-NEXT: slwi r3, r3, 8
+; CHECK64-NEXT: srawi r3, r3, 8
+; CHECK64-NEXT: addi r3, r3, 87
+; CHECK64-NEXT: sldi r3, r3, 40
+; CHECK64-NEXT: sradi r3, r3, 40
+; CHECK64-NEXT: blr
+entry:
+ %0 = add i24 %i, 87
+ %1 = sext i24 %0 to i32
+ ret i32 %1
+}
diff --git a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
index afc7a39..aae2326 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
@@ -750,21 +750,25 @@ entry:
define <2 x double> @testDoubleImm1(<2 x double> %a, double %b) {
; CHECK-64-LABEL: testDoubleImm1:
; CHECK-64: # %bb.0: # %entry
+; CHECK-64-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-64-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-64-NEXT: blr
;
; CHECK-32-LABEL: testDoubleImm1:
; CHECK-32: # %bb.0: # %entry
+; CHECK-32-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-32-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-32-NEXT: blr
;
; CHECK-64-P10-LABEL: testDoubleImm1:
; CHECK-64-P10: # %bb.0: # %entry
+; CHECK-64-P10-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-64-P10-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-64-P10-NEXT: blr
;
; CHECK-32-P10-LABEL: testDoubleImm1:
; CHECK-32-P10: # %bb.0: # %entry
+; CHECK-32-P10-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-32-P10-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-32-P10-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
index 9dd0fbe..10fc308 100644
--- a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -1757,7 +1757,11 @@ entry:
define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
; P9BE-LABEL: fromRegsConvdtoi:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9BE-NEXT: xxmrghd vs0, vs2, vs4
+; P9BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xvcvdpsxws v2, vs0
; P9BE-NEXT: xxmrghd vs0, vs1, vs3
; P9BE-NEXT: xvcvdpsxws v3, vs0
@@ -1766,7 +1770,11 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
;
; P9LE-LABEL: fromRegsConvdtoi:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs3, vs1
+; P9LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9LE-NEXT: xvcvdpsxws v2, vs0
; P9LE-NEXT: xxmrghd vs0, vs4, vs2
; P9LE-NEXT: xvcvdpsxws v3, vs0
@@ -1775,6 +1783,10 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
;
; P8BE-LABEL: fromRegsConvdtoi:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs2, vs4
; P8BE-NEXT: xxmrghd vs1, vs1, vs3
; P8BE-NEXT: xvcvdpsxws v2, vs0
@@ -1784,6 +1796,10 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
;
; P8LE-LABEL: fromRegsConvdtoi:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs3, vs1
; P8LE-NEXT: xxmrghd vs1, vs4, vs2
; P8LE-NEXT: xvcvdpsxws v2, vs0
@@ -3246,7 +3262,11 @@ entry:
define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d) {
; P9BE-LABEL: fromRegsConvdtoui:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9BE-NEXT: xxmrghd vs0, vs2, vs4
+; P9BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xvcvdpuxws v2, vs0
; P9BE-NEXT: xxmrghd vs0, vs1, vs3
; P9BE-NEXT: xvcvdpuxws v3, vs0
@@ -3255,7 +3275,11 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d)
;
; P9LE-LABEL: fromRegsConvdtoui:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs3, vs1
+; P9LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9LE-NEXT: xvcvdpuxws v2, vs0
; P9LE-NEXT: xxmrghd vs0, vs4, vs2
; P9LE-NEXT: xvcvdpuxws v3, vs0
@@ -3264,6 +3288,10 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d)
;
; P8BE-LABEL: fromRegsConvdtoui:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs2, vs4
; P8BE-NEXT: xxmrghd vs1, vs1, vs3
; P8BE-NEXT: xvcvdpuxws v2, vs0
@@ -3273,6 +3301,10 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d)
;
; P8LE-LABEL: fromRegsConvdtoui:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs3, vs1
; P8LE-NEXT: xxmrghd vs1, vs4, vs2
; P8LE-NEXT: xvcvdpuxws v2, vs0
@@ -4546,24 +4578,32 @@ entry:
define <2 x i64> @fromRegsConvdtoll(double %a, double %b) {
; P9BE-LABEL: fromRegsConvdtoll:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xxmrghd vs0, vs1, vs2
; P9BE-NEXT: xvcvdpsxds v2, vs0
; P9BE-NEXT: blr
;
; P9LE-LABEL: fromRegsConvdtoll:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs2, vs1
; P9LE-NEXT: xvcvdpsxds v2, vs0
; P9LE-NEXT: blr
;
; P8BE-LABEL: fromRegsConvdtoll:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs1, vs2
; P8BE-NEXT: xvcvdpsxds v2, vs0
; P8BE-NEXT: blr
;
; P8LE-LABEL: fromRegsConvdtoll:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs2, vs1
; P8LE-NEXT: xvcvdpsxds v2, vs0
; P8LE-NEXT: blr
@@ -5700,24 +5740,32 @@ entry:
define <2 x i64> @fromRegsConvdtoull(double %a, double %b) {
; P9BE-LABEL: fromRegsConvdtoull:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xxmrghd vs0, vs1, vs2
; P9BE-NEXT: xvcvdpuxds v2, vs0
; P9BE-NEXT: blr
;
; P9LE-LABEL: fromRegsConvdtoull:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs2, vs1
; P9LE-NEXT: xvcvdpuxds v2, vs0
; P9LE-NEXT: blr
;
; P8BE-LABEL: fromRegsConvdtoull:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs1, vs2
; P8BE-NEXT: xvcvdpuxds v2, vs0
; P8BE-NEXT: blr
;
; P8LE-LABEL: fromRegsConvdtoull:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs2, vs1
; P8LE-NEXT: xvcvdpuxds v2, vs0
; P8LE-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll b/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll
index 7f6fdc7..b40fbc3 100644
--- a/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll
+++ b/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll
@@ -562,6 +562,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P8-NEXT: bl dummy
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: xxlxor f0, f0, f0
+; CHECK-P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P8-NEXT: xxmrghd vs0, vs1, vs0
; CHECK-P8-NEXT: xxswapd vs0, vs0
; CHECK-P8-NEXT: stxvd2x vs0, 0, r30
@@ -576,6 +577,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P9-NEXT: bl dummy
; CHECK-P9-NEXT: nop
; CHECK-P9-NEXT: xxlxor f0, f0, f0
+; CHECK-P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P9-NEXT: xxmrghd vs0, vs1, vs0
; CHECK-P9-NEXT: stxv vs0, 0(r30)
;
@@ -589,6 +591,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P9-BE-NEXT: bl dummy
; CHECK-P9-BE-NEXT: nop
; CHECK-P9-BE-NEXT: xxlxor f0, f0, f0
+; CHECK-P9-BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P9-BE-NEXT: xxmrghd vs0, vs0, vs1
; CHECK-P9-BE-NEXT: stxv vs0, 0(r30)
;
@@ -615,6 +618,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P7-NEXT: bl dummy
; CHECK-P7-NEXT: nop
; CHECK-P7-NEXT: xxlxor f0, f0, f0
+; CHECK-P7-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P7-NEXT: xxmrghd vs0, vs1, vs0
; CHECK-P7-NEXT: xxswapd vs0, vs0
; CHECK-P7-NEXT: stxvd2x vs0, 0, r30
@@ -629,6 +633,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; P8-AIX-64-NEXT: bl .dummy[PR]
; P8-AIX-64-NEXT: nop
; P8-AIX-64-NEXT: xxlxor f0, f0, f0
+; P8-AIX-64-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-AIX-64-NEXT: xxmrghd vs0, vs0, vs1
; P8-AIX-64-NEXT: stxvd2x vs0, 0, r31
;
@@ -642,6 +647,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; P8-AIX-32-NEXT: bl .dummy[PR]
; P8-AIX-32-NEXT: nop
; P8-AIX-32-NEXT: xxlxor f0, f0, f0
+; P8-AIX-32-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-AIX-32-NEXT: xxmrghd vs0, vs0, vs1
; P8-AIX-32-NEXT: stxvd2x vs0, 0, r31
test_entry:
diff --git a/llvm/test/CodeGen/PowerPC/combine-fneg.ll b/llvm/test/CodeGen/PowerPC/combine-fneg.ll
index 04af094..a72abf7 100644
--- a/llvm/test/CodeGen/PowerPC/combine-fneg.ll
+++ b/llvm/test/CodeGen/PowerPC/combine-fneg.ll
@@ -6,6 +6,7 @@ define <4 x double> @fneg_fdiv_splat(double %a0, <4 x double> %a1) {
; CHECK-LABEL: fneg_fdiv_splat:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxspltd 0, 1, 0
; CHECK-NEXT: addi 3, 3, .LCPI0_0@toc@l
; CHECK-NEXT: xvredp 1, 0
diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
index eac4fb6..4519cf4 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
@@ -229,6 +229,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P8-NEXT: xscvspdpn f1, vs0
; P8-NEXT: bl nearbyintf
; P8-NEXT: nop
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd vs0, vs1, v30
; P8-NEXT: xscvspdpn f1, v31
; P8-NEXT: xvcvdpsp v29, vs0
@@ -239,6 +240,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P8-NEXT: xscvspdpn f1, vs0
; P8-NEXT: bl nearbyintf
; P8-NEXT: nop
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd vs0, v30, vs1
; P8-NEXT: li r3, 160
; P8-NEXT: xvcvdpsp v2, vs0
@@ -276,6 +278,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P9-NEXT: xscvspdpn f1, vs0
; P9-NEXT: bl nearbyintf
; P9-NEXT: nop
+; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9-NEXT: xxmrghd vs0, vs1, v30
; P9-NEXT: xscvspdpn f1, v31
; P9-NEXT: xvcvdpsp v29, vs0
@@ -286,6 +289,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P9-NEXT: xscvspdpn f1, vs0
; P9-NEXT: bl nearbyintf
; P9-NEXT: nop
+; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9-NEXT: xxmrghd vs0, v30, vs1
; P9-NEXT: lxv v31, 64(r1) # 16-byte Folded Reload
; P9-NEXT: lxv v30, 48(r1) # 16-byte Folded Reload
@@ -326,6 +330,7 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) stric
; P8-NEXT: bl nearbyint
; P8-NEXT: nop
; P8-NEXT: li r3, 144
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd v2, v30, vs1
; P8-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; P8-NEXT: li r3, 128
@@ -354,6 +359,7 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) stric
; P9-NEXT: xxswapd vs1, v31
; P9-NEXT: bl nearbyint
; P9-NEXT: nop
+; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9-NEXT: xxmrghd v2, v30, vs1
; P9-NEXT: lxv v31, 48(r1) # 16-byte Folded Reload
; P9-NEXT: lxv v30, 32(r1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/frem.ll b/llvm/test/CodeGen/PowerPC/frem.ll
index 19b4b1c..21cb206 100644
--- a/llvm/test/CodeGen/PowerPC/frem.ll
+++ b/llvm/test/CodeGen/PowerPC/frem.ll
@@ -70,6 +70,7 @@ define <4 x float> @frem4x32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: xscvspdpn 2, 0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd 0, 1, 61
; CHECK-NEXT: xscvspdpn 1, 62
; CHECK-NEXT: xscvspdpn 2, 63
@@ -83,6 +84,7 @@ define <4 x float> @frem4x32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: xscvspdpn 2, 0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd 0, 61, 1
; CHECK-NEXT: lxv 63, 80(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 62, 64(1) # 16-byte Folded Reload
@@ -124,6 +126,7 @@ define <2 x double> @frem2x64(<2 x double> %a, <2 x double> %b) {
; CHECK-NEXT: xxswapd 2, 63
; CHECK-NEXT: bl fmod
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd 34, 61, 1
; CHECK-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
index 238e200..3ae0b02 100644
--- a/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
+++ b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
@@ -41,39 +41,47 @@ define void @test(ptr %p1, ptr %p2) nounwind {
; CHECK-NEXT: xxswapd 61, 63
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 56, 1
; CHECK-NEXT: xxlor 1, 59, 59
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: xxlor 1, 60, 60
; CHECK-NEXT: xxmrgld 59, 0, 56
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 60, 1
; CHECK-NEXT: xxlor 1, 62, 62
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: xxlor 1, 61, 61
; CHECK-NEXT: xxmrgld 62, 0, 60
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 61, 1
; CHECK-NEXT: xxlor 1, 63, 63
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: xxlor 1, 57, 57
; CHECK-NEXT: xxmrgld 63, 0, 61
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 61, 1
; CHECK-NEXT: xxlor 1, 58, 58
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
; CHECK-NEXT: li 3, 160
; CHECK-NEXT: stxvd2x 63, 30, 29
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: stxvd2x 62, 30, 28
; CHECK-NEXT: stxvd2x 59, 30, 27
diff --git a/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll b/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll
index 50f05cc..b83ac4a 100644
--- a/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll
+++ b/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll
@@ -666,6 +666,7 @@ define <4 x float> @test_extend32_vec4(ptr %p) #0 {
; P8-NEXT: bl __extendhfsf2
; P8-NEXT: nop
; P8-NEXT: li r3, 80
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd vs0, vs61, vs1
; P8-NEXT: xxmrghd vs1, vs63, vs62
; P8-NEXT: ld r30, 96(r1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/ldexp.ll b/llvm/test/CodeGen/PowerPC/ldexp.ll
index 8d7253b..23748bc 100644
--- a/llvm/test/CodeGen/PowerPC/ldexp.ll
+++ b/llvm/test/CodeGen/PowerPC/ldexp.ll
@@ -107,6 +107,7 @@ define <4 x float> @ldexp_v4f32(<4 x float> %val, <4 x i32> %exp) nounwind {
; CHECK-NEXT: extsw r4, r3
; CHECK-NEXT: bl ldexpf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd vs0, vs1, v29
; CHECK-NEXT: li r3, 0
; CHECK-NEXT: vextuwrx r3, r3, v31
@@ -123,6 +124,7 @@ define <4 x float> @ldexp_v4f32(<4 x float> %val, <4 x i32> %exp) nounwind {
; CHECK-NEXT: xscvspdpn f1, vs0
; CHECK-NEXT: bl ldexpf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd vs0, vs1, v29
; CHECK-NEXT: lxv v31, 80(r1) # 16-byte Folded Reload
; CHECK-NEXT: lxv v30, 64(r1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/llvm.modf.ll b/llvm/test/CodeGen/PowerPC/llvm.modf.ll
index 1b137c7..203b3bd 100644
--- a/llvm/test/CodeGen/PowerPC/llvm.modf.ll
+++ b/llvm/test/CodeGen/PowerPC/llvm.modf.ll
@@ -294,6 +294,7 @@ define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) {
; CHECK-NEXT: addi r4, r1, 40
; CHECK-NEXT: bl modf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd v2, v30, vs1
; CHECK-NEXT: lfd f0, 32(r1)
; CHECK-NEXT: lfd f1, 40(r1)
diff --git a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
index 9f62477..af0942e 100644
--- a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
+++ b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
@@ -56,155 +56,153 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: .cfi_offset v29, -240
; CHECK-NEXT: .cfi_offset v30, -224
; CHECK-NEXT: .cfi_offset v31, -208
+; CHECK-NEXT: std 14, 400(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 15, 408(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 2, 728(1)
+; CHECK-NEXT: ld 14, 688(1)
+; CHECK-NEXT: ld 11, 704(1)
+; CHECK-NEXT: std 20, 448(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 21, 456(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 21, 5
+; CHECK-NEXT: lwa 5, 0(7)
+; CHECK-NEXT: ld 7, 720(1)
; CHECK-NEXT: std 22, 464(1) # 8-byte Folded Spill
; CHECK-NEXT: std 23, 472(1) # 8-byte Folded Spill
-; CHECK-NEXT: mr 22, 5
-; CHECK-NEXT: ld 5, 848(1)
+; CHECK-NEXT: mr 22, 6
+; CHECK-NEXT: ld 6, 848(1)
; CHECK-NEXT: addi 3, 3, 1
-; CHECK-NEXT: mr 11, 7
-; CHECK-NEXT: ld 23, 688(1)
-; CHECK-NEXT: ld 7, 728(1)
+; CHECK-NEXT: ld 15, 736(1)
; CHECK-NEXT: std 18, 432(1) # 8-byte Folded Spill
; CHECK-NEXT: std 19, 440(1) # 8-byte Folded Spill
-; CHECK-NEXT: mr 18, 6
-; CHECK-NEXT: li 6, 9
; CHECK-NEXT: ld 19, 768(1)
-; CHECK-NEXT: ld 2, 760(1)
-; CHECK-NEXT: std 26, 496(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 27, 504(1) # 8-byte Folded Spill
-; CHECK-NEXT: cmpldi 3, 9
-; CHECK-NEXT: ld 27, 816(1)
-; CHECK-NEXT: ld 26, 808(1)
-; CHECK-NEXT: std 14, 400(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 15, 408(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 15, 736(1)
-; CHECK-NEXT: lxv 39, 0(8)
+; CHECK-NEXT: ld 18, 760(1)
; CHECK-NEXT: std 30, 528(1) # 8-byte Folded Spill
; CHECK-NEXT: std 31, 536(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 30, 704(1)
-; CHECK-NEXT: lxv 38, 0(9)
-; CHECK-NEXT: std 20, 448(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 21, 456(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 21, 784(1)
+; CHECK-NEXT: ld 12, 696(1)
+; CHECK-NEXT: lxv 0, 0(9)
+; CHECK-NEXT: std 9, 64(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 10, 72(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 1, 0(8)
+; CHECK-NEXT: cmpldi 3, 9
+; CHECK-NEXT: ld 30, 824(1)
+; CHECK-NEXT: std 28, 512(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 29, 520(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 29, 840(1)
+; CHECK-NEXT: ld 28, 832(1)
+; CHECK-NEXT: std 16, 416(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 17, 424(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 23, 784(1)
; CHECK-NEXT: ld 20, 776(1)
; CHECK-NEXT: std 24, 480(1) # 8-byte Folded Spill
; CHECK-NEXT: std 25, 488(1) # 8-byte Folded Spill
-; CHECK-NEXT: iselgt 3, 3, 6
-; CHECK-NEXT: ld 6, 720(1)
+; CHECK-NEXT: ld 25, 800(1)
; CHECK-NEXT: ld 24, 792(1)
-; CHECK-NEXT: std 10, 72(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 7, 80(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 26, 496(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 27, 504(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 27, 816(1)
+; CHECK-NEXT: ld 26, 808(1)
+; CHECK-NEXT: stfd 26, 544(1) # 8-byte Folded Spill
+; CHECK-NEXT: stfd 27, 552(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 17, 752(1)
+; CHECK-NEXT: extswsli 9, 5, 3
+; CHECK-NEXT: lxv 4, 0(14)
+; CHECK-NEXT: std 14, 32(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 12, 40(1) # 8-byte Folded Spill
+; CHECK-NEXT: mulli 0, 5, 40
+; CHECK-NEXT: sldi 14, 5, 5
+; CHECK-NEXT: mulli 31, 5, 24
+; CHECK-NEXT: lxv 38, 0(2)
+; CHECK-NEXT: lxv 2, 0(11)
+; CHECK-NEXT: std 2, 80(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 15, 88(1) # 8-byte Folded Spill
+; CHECK-NEXT: mulli 2, 5, 48
+; CHECK-NEXT: sldi 5, 5, 4
+; CHECK-NEXT: ld 16, 744(1)
+; CHECK-NEXT: lxv 5, 0(10)
+; CHECK-NEXT: std 6, 200(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 29, 192(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 6, 712(1)
+; CHECK-NEXT: mr 10, 7
+; CHECK-NEXT: add 7, 14, 21
+; CHECK-NEXT: lxv 13, 0(19)
+; CHECK-NEXT: std 8, 48(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 6, 56(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 8, 11
+; CHECK-NEXT: li 11, 9
+; CHECK-NEXT: iselgt 3, 3, 11
; CHECK-NEXT: addi 3, 3, -2
-; CHECK-NEXT: lxv 6, 0(19)
-; CHECK-NEXT: lxv 11, 0(7)
-; CHECK-NEXT: std 5, 200(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 23, 40(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 6, 48(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 840(1)
-; CHECK-NEXT: lxv 12, 0(6)
-; CHECK-NEXT: rldicl 12, 3, 61, 3
+; CHECK-NEXT: rldicl 11, 3, 61, 3
+; CHECK-NEXT: lxv 3, 0(12)
+; CHECK-NEXT: lxv 40, 0(6)
+; CHECK-NEXT: std 18, 112(1) # 8-byte Folded Spill
; CHECK-NEXT: std 19, 120(1) # 8-byte Folded Spill
+; CHECK-NEXT: add 19, 21, 5
+; CHECK-NEXT: ld 5, 200(1) # 8-byte Folded Reload
+; CHECK-NEXT: lxv 39, 0(10)
+; CHECK-NEXT: addi 3, 7, 32
+; CHECK-NEXT: add 12, 31, 21
; CHECK-NEXT: std 20, 128(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 21, 136(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 24, 144(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 4, 0(21)
-; CHECK-NEXT: ld 25, 800(1)
-; CHECK-NEXT: lxv 33, 0(10)
-; CHECK-NEXT: lxv 32, 0(23)
-; CHECK-NEXT: lxv 36, 0(30)
-; CHECK-NEXT: std 16, 416(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 17, 424(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 17, 752(1)
-; CHECK-NEXT: ld 16, 744(1)
-; CHECK-NEXT: std 28, 512(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 29, 520(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 29, 712(1)
-; CHECK-NEXT: ld 28, 696(1)
-; CHECK-NEXT: std 8, 56(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 9, 64(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 37, 0(28)
-; CHECK-NEXT: lxv 13, 0(29)
-; CHECK-NEXT: mr 8, 29
-; CHECK-NEXT: mr 9, 30
-; CHECK-NEXT: mr 10, 28
-; CHECK-NEXT: std 25, 152(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 23, 136(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 33, 0(15)
+; CHECK-NEXT: lxv 32, 0(16)
; CHECK-NEXT: std 26, 160(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 10, 0(15)
-; CHECK-NEXT: lxv 9, 0(16)
-; CHECK-NEXT: li 28, 1
-; CHECK-NEXT: stfd 26, 544(1) # 8-byte Folded Spill
-; CHECK-NEXT: stfd 27, 552(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 8, 0(17)
-; CHECK-NEXT: lxv 7, 0(2)
+; CHECK-NEXT: std 27, 168(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 37, 0(17)
+; CHECK-NEXT: lxv 36, 0(18)
+; CHECK-NEXT: std 30, 176(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 28, 184(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 12, 0(20)
+; CHECK-NEXT: lxv 11, 0(23)
+; CHECK-NEXT: add 20, 21, 9
; CHECK-NEXT: stfd 28, 560(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 29, 568(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 5, 0(20)
-; CHECK-NEXT: lxv 3, 0(24)
+; CHECK-NEXT: lxv 10, 0(24)
+; CHECK-NEXT: lxv 9, 0(25)
; CHECK-NEXT: stfd 30, 576(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 31, 584(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 2, 0(25)
-; CHECK-NEXT: lxv 1, 0(26)
+; CHECK-NEXT: lxv 8, 0(26)
+; CHECK-NEXT: lxv 7, 0(27)
+; CHECK-NEXT: addi 12, 12, 32
+; CHECK-NEXT: li 27, 0
+; CHECK-NEXT: mr 26, 21
; CHECK-NEXT: stxv 52, 208(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 53, 224(1) # 16-byte Folded Spill
-; CHECK-NEXT: lxv 0, 0(27)
+; CHECK-NEXT: lxv 6, 0(30)
+; CHECK-NEXT: lxv 41, 0(28)
+; CHECK-NEXT: addi 7, 11, 1
+; CHECK-NEXT: add 11, 0, 21
+; CHECK-NEXT: li 28, 1
; CHECK-NEXT: stxv 54, 240(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 55, 256(1) # 16-byte Folded Spill
+; CHECK-NEXT: lxv 43, 0(29)
+; CHECK-NEXT: lxv 42, 0(5)
; CHECK-NEXT: stxv 56, 272(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 57, 288(1) # 16-byte Folded Spill
+; CHECK-NEXT: addi 11, 11, 32
; CHECK-NEXT: stxv 58, 304(1) # 16-byte Folded Spill
-; CHECK-NEXT: std 5, 192(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 832(1)
; CHECK-NEXT: stxv 59, 320(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 60, 336(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 61, 352(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 62, 368(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 63, 384(1) # 16-byte Folded Spill
-; CHECK-NEXT: std 15, 88(1) # 8-byte Folded Spill
; CHECK-NEXT: std 16, 96(1) # 8-byte Folded Spill
; CHECK-NEXT: std 17, 104(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 2, 112(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 5, 184(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 824(1)
-; CHECK-NEXT: std 5, 176(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 27, 168(1) # 8-byte Folded Spill
-; CHECK-NEXT: lwa 5, 0(11)
-; CHECK-NEXT: li 27, 0
-; CHECK-NEXT: ld 7, 176(1) # 8-byte Folded Reload
-; CHECK-NEXT: mulli 6, 5, 40
-; CHECK-NEXT: sldi 0, 5, 4
-; CHECK-NEXT: extswsli 14, 5, 3
-; CHECK-NEXT: lxv 40, 0(7)
-; CHECK-NEXT: ld 7, 184(1) # 8-byte Folded Reload
-; CHECK-NEXT: add 31, 14, 22
-; CHECK-NEXT: add 11, 0, 22
-; CHECK-NEXT: mr 26, 22
-; CHECK-NEXT: addi 3, 11, 32
-; CHECK-NEXT: addi 11, 12, 1
-; CHECK-NEXT: mulli 12, 5, 48
-; CHECK-NEXT: addi 31, 31, 32
-; CHECK-NEXT: add 19, 22, 6
-; CHECK-NEXT: sldi 6, 5, 5
-; CHECK-NEXT: mulli 5, 5, 24
-; CHECK-NEXT: lxv 41, 0(7)
-; CHECK-NEXT: add 20, 22, 6
-; CHECK-NEXT: add 21, 22, 5
-; CHECK-NEXT: ld 5, 192(1) # 8-byte Folded Reload
-; CHECK-NEXT: lxv 43, 0(5)
-; CHECK-NEXT: ld 5, 200(1) # 8-byte Folded Reload
-; CHECK-NEXT: lxv 42, 0(5)
+; CHECK-NEXT: std 24, 144(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 25, 152(1) # 8-byte Folded Spill
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB0_3: # %_loop_2_do_.lr.ph
; CHECK-NEXT: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB0_4 Depth 2
-; CHECK-NEXT: maddld 5, 12, 27, 0
-; CHECK-NEXT: mr 6, 18
-; CHECK-NEXT: mr 29, 21
+; CHECK-NEXT: maddld 5, 2, 27, 0
+; CHECK-NEXT: mr 6, 22
; CHECK-NEXT: mr 30, 20
-; CHECK-NEXT: mr 2, 19
-; CHECK-NEXT: mtctr 11
-; CHECK-NEXT: add 25, 22, 5
-; CHECK-NEXT: maddld 5, 12, 27, 14
-; CHECK-NEXT: add 24, 22, 5
+; CHECK-NEXT: mr 29, 19
+; CHECK-NEXT: mtctr 7
+; CHECK-NEXT: add 25, 21, 5
+; CHECK-NEXT: maddld 5, 2, 27, 14
+; CHECK-NEXT: add 24, 21, 5
+; CHECK-NEXT: maddld 5, 2, 27, 31
+; CHECK-NEXT: add 23, 21, 5
; CHECK-NEXT: mr 5, 26
; CHECK-NEXT: .p2align 5
; CHECK-NEXT: .LBB0_4: # %_loop_2_do_
@@ -212,66 +210,66 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lxvp 34, 0(6)
; CHECK-NEXT: lxvp 44, 0(5)
-; CHECK-NEXT: xvmaddadp 39, 45, 35
-; CHECK-NEXT: lxvp 46, 0(24)
-; CHECK-NEXT: xvmaddadp 38, 47, 35
-; CHECK-NEXT: lxvp 48, 0(25)
-; CHECK-NEXT: lxvp 50, 0(29)
-; CHECK-NEXT: lxvp 62, 0(30)
-; CHECK-NEXT: lxvp 60, 0(2)
+; CHECK-NEXT: xvmaddadp 1, 45, 35
+; CHECK-NEXT: lxvp 46, 0(30)
+; CHECK-NEXT: xvmaddadp 0, 47, 35
+; CHECK-NEXT: lxvp 48, 0(29)
+; CHECK-NEXT: lxvp 50, 0(23)
+; CHECK-NEXT: lxvp 62, 0(24)
+; CHECK-NEXT: lxvp 60, 0(25)
; CHECK-NEXT: lxvp 58, 32(6)
; CHECK-NEXT: lxvp 56, 32(5)
-; CHECK-NEXT: lxvp 54, 32(24)
-; CHECK-NEXT: lxvp 52, 32(25)
-; CHECK-NEXT: lxvp 30, 32(29)
-; CHECK-NEXT: lxvp 28, 32(30)
-; CHECK-NEXT: lxvp 26, 32(2)
-; CHECK-NEXT: xvmaddadp 33, 49, 35
-; CHECK-NEXT: xvmaddadp 32, 51, 35
-; CHECK-NEXT: xvmaddadp 37, 63, 35
-; CHECK-NEXT: xvmaddadp 36, 61, 35
-; CHECK-NEXT: xvmaddadp 13, 44, 34
-; CHECK-NEXT: xvmaddadp 12, 46, 34
-; CHECK-NEXT: xvmaddadp 11, 48, 34
-; CHECK-NEXT: xvmaddadp 10, 50, 34
-; CHECK-NEXT: xvmaddadp 9, 62, 34
-; CHECK-NEXT: xvmaddadp 8, 60, 34
-; CHECK-NEXT: xvmaddadp 7, 57, 59
-; CHECK-NEXT: xvmaddadp 6, 55, 59
-; CHECK-NEXT: xvmaddadp 5, 53, 59
-; CHECK-NEXT: xvmaddadp 4, 31, 59
-; CHECK-NEXT: xvmaddadp 3, 29, 59
-; CHECK-NEXT: xvmaddadp 2, 27, 59
-; CHECK-NEXT: xvmaddadp 1, 56, 58
-; CHECK-NEXT: xvmaddadp 0, 54, 58
-; CHECK-NEXT: xvmaddadp 40, 52, 58
+; CHECK-NEXT: lxvp 54, 32(30)
+; CHECK-NEXT: lxvp 52, 32(29)
+; CHECK-NEXT: lxvp 30, 32(23)
+; CHECK-NEXT: lxvp 28, 32(24)
+; CHECK-NEXT: lxvp 26, 32(25)
+; CHECK-NEXT: xvmaddadp 5, 49, 35
+; CHECK-NEXT: xvmaddadp 4, 51, 35
+; CHECK-NEXT: xvmaddadp 3, 63, 35
+; CHECK-NEXT: xvmaddadp 2, 61, 35
+; CHECK-NEXT: xvmaddadp 40, 44, 34
+; CHECK-NEXT: xvmaddadp 39, 46, 34
+; CHECK-NEXT: xvmaddadp 38, 48, 34
+; CHECK-NEXT: xvmaddadp 33, 50, 34
+; CHECK-NEXT: xvmaddadp 32, 62, 34
+; CHECK-NEXT: xvmaddadp 37, 60, 34
+; CHECK-NEXT: xvmaddadp 36, 57, 59
+; CHECK-NEXT: xvmaddadp 13, 55, 59
+; CHECK-NEXT: xvmaddadp 12, 53, 59
+; CHECK-NEXT: xvmaddadp 11, 31, 59
+; CHECK-NEXT: xvmaddadp 10, 29, 59
+; CHECK-NEXT: xvmaddadp 9, 27, 59
+; CHECK-NEXT: xvmaddadp 8, 56, 58
+; CHECK-NEXT: xvmaddadp 7, 54, 58
+; CHECK-NEXT: xvmaddadp 6, 52, 58
; CHECK-NEXT: xvmaddadp 41, 30, 58
; CHECK-NEXT: xvmaddadp 43, 28, 58
; CHECK-NEXT: xvmaddadp 42, 26, 58
; CHECK-NEXT: addi 6, 6, 64
; CHECK-NEXT: addi 5, 5, 64
+; CHECK-NEXT: addi 30, 30, 64
+; CHECK-NEXT: addi 29, 29, 64
+; CHECK-NEXT: addi 23, 23, 64
; CHECK-NEXT: addi 24, 24, 64
; CHECK-NEXT: addi 25, 25, 64
-; CHECK-NEXT: addi 29, 29, 64
-; CHECK-NEXT: addi 30, 30, 64
-; CHECK-NEXT: addi 2, 2, 64
; CHECK-NEXT: bdnz .LBB0_4
; CHECK-NEXT: # %bb.5: # %_loop_2_endl_
; CHECK-NEXT: #
; CHECK-NEXT: addi 28, 28, 6
-; CHECK-NEXT: add 26, 26, 12
-; CHECK-NEXT: add 31, 31, 12
-; CHECK-NEXT: add 19, 19, 12
-; CHECK-NEXT: add 3, 3, 12
-; CHECK-NEXT: add 20, 20, 12
-; CHECK-NEXT: add 21, 21, 12
+; CHECK-NEXT: add 26, 26, 2
+; CHECK-NEXT: add 20, 20, 2
+; CHECK-NEXT: add 11, 11, 2
+; CHECK-NEXT: add 19, 19, 2
+; CHECK-NEXT: add 3, 3, 2
+; CHECK-NEXT: add 12, 12, 2
; CHECK-NEXT: addi 27, 27, 1
; CHECK-NEXT: cmpld 28, 4
; CHECK-NEXT: ble 0, .LBB0_3
; CHECK-NEXT: # %bb.6: # %_loop_1_loopHeader_._return_bb_crit_edge.loopexit
-; CHECK-NEXT: ld 3, 56(1) # 8-byte Folded Reload
+; CHECK-NEXT: ld 3, 48(1) # 8-byte Folded Reload
; CHECK-NEXT: lxv 63, 384(1) # 16-byte Folded Reload
-; CHECK-NEXT: stxv 39, 0(3)
+; CHECK-NEXT: stxv 1, 0(3)
; CHECK-NEXT: ld 3, 64(1) # 8-byte Folded Reload
; CHECK-NEXT: lxv 62, 368(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 61, 352(1) # 16-byte Folded Reload
@@ -284,7 +282,7 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: lxv 54, 240(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 53, 224(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 52, 208(1) # 16-byte Folded Reload
-; CHECK-NEXT: stxv 38, 0(3)
+; CHECK-NEXT: stxv 0, 0(3)
; CHECK-NEXT: ld 3, 72(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 31, 584(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 30, 576(1) # 8-byte Folded Reload
@@ -297,8 +295,8 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: ld 29, 520(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 28, 512(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 27, 504(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 33, 0(3)
-; CHECK-NEXT: ld 3, 40(1) # 8-byte Folded Reload
+; CHECK-NEXT: stxv 5, 0(3)
+; CHECK-NEXT: ld 3, 32(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 26, 496(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 25, 488(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 24, 480(1) # 8-byte Folded Reload
@@ -310,40 +308,41 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: ld 18, 432(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 17, 424(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 16, 416(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 32, 0(3)
-; CHECK-NEXT: ld 3, 48(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 37, 0(10)
-; CHECK-NEXT: stxv 36, 0(9)
-; CHECK-NEXT: stxv 13, 0(8)
+; CHECK-NEXT: stxv 4, 0(3)
+; CHECK-NEXT: ld 3, 40(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 15, 408(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 14, 400(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 12, 0(3)
+; CHECK-NEXT: stxv 3, 0(3)
+; CHECK-NEXT: ld 3, 56(1) # 8-byte Folded Reload
+; CHECK-NEXT: stxv 2, 0(8)
+; CHECK-NEXT: stxv 40, 0(3)
; CHECK-NEXT: ld 3, 80(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 11, 0(3)
+; CHECK-NEXT: stxv 39, 0(10)
+; CHECK-NEXT: stxv 38, 0(3)
; CHECK-NEXT: ld 3, 88(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 10, 0(3)
+; CHECK-NEXT: stxv 33, 0(3)
; CHECK-NEXT: ld 3, 96(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 9, 0(3)
+; CHECK-NEXT: stxv 32, 0(3)
; CHECK-NEXT: ld 3, 104(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 8, 0(3)
+; CHECK-NEXT: stxv 37, 0(3)
; CHECK-NEXT: ld 3, 112(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 7, 0(3)
+; CHECK-NEXT: stxv 36, 0(3)
; CHECK-NEXT: ld 3, 120(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 6, 0(3)
+; CHECK-NEXT: stxv 13, 0(3)
; CHECK-NEXT: ld 3, 128(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 5, 0(3)
+; CHECK-NEXT: stxv 12, 0(3)
; CHECK-NEXT: ld 3, 136(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 4, 0(3)
+; CHECK-NEXT: stxv 11, 0(3)
; CHECK-NEXT: ld 3, 144(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 3, 0(3)
+; CHECK-NEXT: stxv 10, 0(3)
; CHECK-NEXT: ld 3, 152(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 2, 0(3)
+; CHECK-NEXT: stxv 9, 0(3)
; CHECK-NEXT: ld 3, 160(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 1, 0(3)
+; CHECK-NEXT: stxv 8, 0(3)
; CHECK-NEXT: ld 3, 168(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 0, 0(3)
+; CHECK-NEXT: stxv 7, 0(3)
; CHECK-NEXT: ld 3, 176(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 40, 0(3)
+; CHECK-NEXT: stxv 6, 0(3)
; CHECK-NEXT: ld 3, 184(1) # 8-byte Folded Reload
; CHECK-NEXT: stxv 41, 0(3)
; CHECK-NEXT: ld 3, 192(1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll b/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
index 799ba63..8fb4c21 100644
--- a/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
+++ b/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
@@ -40,9 +40,10 @@ define signext i32 @test(ptr noalias %PtrA, ptr noalias %PtrB, i32 signext %LenA
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB0_4: # %if.end9
; CHECK-NEXT: #
-; CHECK-NEXT: lwzx 10, 6, 9
+; CHECK-NEXT: add 9, 3, 9
+; CHECK-NEXT: lwz 10, 4(9)
; CHECK-NEXT: addi 10, 10, 1
-; CHECK-NEXT: stwx 10, 6, 9
+; CHECK-NEXT: stw 10, 4(9)
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_5: # %if.then
; CHECK-NEXT: lwax 3, 9, 3
diff --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
index 291a9c1..b98aed8 100644
--- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
@@ -940,21 +940,25 @@ entry:
define <2 x double> @testDoubleImm1(<2 x double> %a, double %b) {
; CHECK-LABEL: testDoubleImm1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd v2, v2, vs1
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: testDoubleImm1:
; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-BE-NEXT: xxpermdi v2, vs1, v2, 1
; CHECK-BE-NEXT: blr
;
; CHECK-P9-LABEL: testDoubleImm1:
; CHECK-P9: # %bb.0: # %entry
+; CHECK-P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P9-NEXT: xxpermdi v2, vs1, v2, 1
; CHECK-P9-NEXT: blr
;
; AIX-P8-LABEL: testDoubleImm1:
; AIX-P8: # %bb.0: # %entry
+; AIX-P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; AIX-P8-NEXT: xxpermdi v2, vs1, v2, 1
; AIX-P8-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
index 71c3069..5dac21b 100644
--- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
@@ -107,6 +107,10 @@ entry:
define <3 x double> @constrained_vector_fdiv_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fdiv_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xsdivdp 3, 3, 6
@@ -116,6 +120,10 @@ define <3 x double> @constrained_vector_fdiv_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fdiv_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xsdivdp 3, 3, 6
@@ -209,6 +217,7 @@ define <2 x double> @constrained_vector_frem_v2f64(<2 x double> %x, <2 x double>
; PC64LE-NEXT: bl fmod
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -239,6 +248,7 @@ define <2 x double> @constrained_vector_frem_v2f64(<2 x double> %x, <2 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 61, 1
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
@@ -390,6 +400,7 @@ define <3 x double> @constrained_vector_frem_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: fmr 2, 30
; PC64LE-NEXT: bl fmod
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 29
; PC64LE-NEXT: fmr 2, 31
@@ -431,6 +442,7 @@ define <3 x double> @constrained_vector_frem_v3f64(<3 x double> %x, <3 x double>
; PC64LE9-NEXT: fmr 2, 30
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 29
; PC64LE9-NEXT: fmr 2, 31
@@ -486,6 +498,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: xxswapd 2, 62
; PC64LE-NEXT: bl fmod
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 59, 1
; PC64LE-NEXT: xxlor 1, 61, 61
; PC64LE-NEXT: xxlor 2, 63, 63
@@ -498,6 +511,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 112
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 60, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 96
@@ -536,6 +550,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 62
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 59, 1
; PC64LE9-NEXT: xscpsgndp 1, 61, 61
; PC64LE9-NEXT: xscpsgndp 2, 63, 63
@@ -546,6 +561,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 60, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload
@@ -670,6 +686,10 @@ entry:
define <3 x double> @constrained_vector_fmul_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fmul_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xsmuldp 3, 3, 6
@@ -679,6 +699,10 @@ define <3 x double> @constrained_vector_fmul_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fmul_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xsmuldp 3, 3, 6
@@ -820,6 +844,10 @@ entry:
define <3 x double> @constrained_vector_fadd_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fadd_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xsadddp 3, 3, 6
@@ -829,6 +857,10 @@ define <3 x double> @constrained_vector_fadd_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fadd_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xsadddp 3, 3, 6
@@ -970,6 +1002,10 @@ entry:
define <3 x double> @constrained_vector_fsub_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fsub_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xssubdp 3, 3, 6
@@ -979,6 +1015,10 @@ define <3 x double> @constrained_vector_fsub_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fsub_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xssubdp 3, 3, 6
@@ -1105,6 +1145,8 @@ entry:
define <3 x double> @constrained_vector_sqrt_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_sqrt_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xssqrtdp 3, 3
; PC64LE-NEXT: xvsqrtdp 2, 0
@@ -1113,6 +1155,8 @@ define <3 x double> @constrained_vector_sqrt_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_sqrt_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xssqrtdp 3, 3
; PC64LE9-NEXT: xvsqrtdp 2, 0
@@ -1203,6 +1247,7 @@ define <2 x double> @constrained_vector_pow_v2f64(<2 x double> %x, <2 x double>
; PC64LE-NEXT: bl pow
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -1233,6 +1278,7 @@ define <2 x double> @constrained_vector_pow_v2f64(<2 x double> %x, <2 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 61, 1
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
@@ -1384,6 +1430,7 @@ define <3 x double> @constrained_vector_pow_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: fmr 2, 30
; PC64LE-NEXT: bl pow
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 29
; PC64LE-NEXT: fmr 2, 31
@@ -1425,6 +1472,7 @@ define <3 x double> @constrained_vector_pow_v3f64(<3 x double> %x, <3 x double>
; PC64LE9-NEXT: fmr 2, 30
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 29
; PC64LE9-NEXT: fmr 2, 31
@@ -1480,6 +1528,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: xxswapd 2, 62
; PC64LE-NEXT: bl pow
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 59, 1
; PC64LE-NEXT: xxlor 1, 61, 61
; PC64LE-NEXT: xxlor 2, 63, 63
@@ -1492,6 +1541,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 112
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 60, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 96
@@ -1530,6 +1580,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 62
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 59, 1
; PC64LE9-NEXT: xscpsgndp 1, 61, 61
; PC64LE9-NEXT: xscpsgndp 2, 63, 63
@@ -1540,6 +1591,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 60, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload
@@ -1618,6 +1670,7 @@ define <2 x double> @constrained_vector_powi_v2f64(<2 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: bl __powidf2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: ld 30, 80(1) # 8-byte Folded Reload
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
@@ -1647,6 +1700,7 @@ define <2 x double> @constrained_vector_powi_v2f64(<2 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -1790,6 +1844,7 @@ define <3 x double> @constrained_vector_powi_v3f64(<3 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: mr 4, 30
; PC64LE-NEXT: bl __powidf2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: mr 4, 30
@@ -1828,6 +1883,7 @@ define <3 x double> @constrained_vector_powi_v3f64(<3 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: mr 4, 30
@@ -1878,6 +1934,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: mr 4, 30
; PC64LE-NEXT: bl __powidf2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: mr 4, 30
@@ -1890,6 +1947,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: ld 30, 96(1) # 8-byte Folded Reload
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
@@ -1923,6 +1981,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: mr 4, 30
@@ -1933,6 +1992,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -2003,6 +2063,7 @@ define <2 x double> @constrained_vector_sin_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl sin
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -2027,6 +2088,7 @@ define <2 x double> @constrained_vector_sin_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -2149,6 +2211,7 @@ define <3 x double> @constrained_vector_sin_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl sin
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl sin
@@ -2181,6 +2244,7 @@ define <3 x double> @constrained_vector_sin_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl sin
@@ -2224,6 +2288,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl sin
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl sin
@@ -2234,6 +2299,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -2262,6 +2328,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl sin
@@ -2270,6 +2337,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -2338,6 +2406,7 @@ define <2 x double> @constrained_vector_cos_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl cos
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -2362,6 +2431,7 @@ define <2 x double> @constrained_vector_cos_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -2484,6 +2554,7 @@ define <3 x double> @constrained_vector_cos_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl cos
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl cos
@@ -2516,6 +2587,7 @@ define <3 x double> @constrained_vector_cos_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl cos
@@ -2559,6 +2631,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl cos
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl cos
@@ -2569,6 +2642,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -2597,6 +2671,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl cos
@@ -2605,6 +2680,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -2673,6 +2749,7 @@ define <2 x double> @constrained_vector_exp_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl exp
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -2697,6 +2774,7 @@ define <2 x double> @constrained_vector_exp_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -2819,6 +2897,7 @@ define <3 x double> @constrained_vector_exp_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl exp
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl exp
@@ -2851,6 +2930,7 @@ define <3 x double> @constrained_vector_exp_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl exp
@@ -2894,6 +2974,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl exp
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl exp
@@ -2904,6 +2985,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -2932,6 +3014,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl exp
@@ -2940,6 +3023,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -3008,6 +3092,7 @@ define <2 x double> @constrained_vector_exp2_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl exp2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -3032,6 +3117,7 @@ define <2 x double> @constrained_vector_exp2_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -3154,6 +3240,7 @@ define <3 x double> @constrained_vector_exp2_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl exp2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl exp2
@@ -3186,6 +3273,7 @@ define <3 x double> @constrained_vector_exp2_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl exp2
@@ -3229,6 +3317,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl exp2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl exp2
@@ -3239,6 +3328,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -3267,6 +3357,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl exp2
@@ -3275,6 +3366,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -3343,6 +3435,7 @@ define <2 x double> @constrained_vector_log_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl log
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -3367,6 +3460,7 @@ define <2 x double> @constrained_vector_log_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -3489,6 +3583,7 @@ define <3 x double> @constrained_vector_log_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl log
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl log
@@ -3521,6 +3616,7 @@ define <3 x double> @constrained_vector_log_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl log
@@ -3564,6 +3660,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl log
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl log
@@ -3574,6 +3671,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -3602,6 +3700,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl log
@@ -3610,6 +3709,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -3678,6 +3778,7 @@ define <2 x double> @constrained_vector_log10_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl log10
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -3702,6 +3803,7 @@ define <2 x double> @constrained_vector_log10_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -3824,6 +3926,7 @@ define <3 x double> @constrained_vector_log10_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl log10
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl log10
@@ -3856,6 +3959,7 @@ define <3 x double> @constrained_vector_log10_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl log10
@@ -3899,6 +4003,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl log10
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl log10
@@ -3909,6 +4014,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -3937,6 +4043,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl log10
@@ -3945,6 +4052,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -4013,6 +4121,7 @@ define <2 x double> @constrained_vector_log2_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl log2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -4037,6 +4146,7 @@ define <2 x double> @constrained_vector_log2_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -4159,6 +4269,7 @@ define <3 x double> @constrained_vector_log2_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl log2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl log2
@@ -4191,6 +4302,7 @@ define <3 x double> @constrained_vector_log2_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl log2
@@ -4234,6 +4346,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl log2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl log2
@@ -4244,6 +4357,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -4272,6 +4386,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl log2
@@ -4280,6 +4395,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -4387,6 +4503,8 @@ define <3 x float> @constrained_vector_rint_v3f32(<3 x float> %x) #0 {
define <3 x double> @constrained_vector_rint_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_rint_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpic 3, 3
; PC64LE-NEXT: xvrdpic 2, 0
@@ -4395,6 +4513,8 @@ define <3 x double> @constrained_vector_rint_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_rint_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpic 3, 3
; PC64LE9-NEXT: xvrdpic 2, 0
@@ -4479,6 +4599,7 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl nearbyint
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -4503,6 +4624,7 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -4625,6 +4747,7 @@ define <3 x double> @constrained_vector_nearby_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl nearbyint
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl nearbyint
@@ -4657,6 +4780,7 @@ define <3 x double> @constrained_vector_nearby_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl nearbyint
@@ -4700,6 +4824,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl nearbyint
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl nearbyint
@@ -4710,6 +4835,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -4738,6 +4864,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl nearbyint
@@ -4746,6 +4873,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -4927,6 +5055,10 @@ define <3 x double> @constrained_vector_max_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: mflr 0
; PC64LE-NEXT: stdu 1, -64(1)
; PC64LE-NEXT: li 3, 48
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: std 0, 80(1)
@@ -4950,6 +5082,10 @@ define <3 x double> @constrained_vector_max_v3f64(<3 x double> %x, <3 x double>
; PC64LE9: # %bb.0: # %entry
; PC64LE9-NEXT: mflr 0
; PC64LE9-NEXT: stdu 1, -48(1)
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: std 0, 64(1)
@@ -5159,6 +5295,10 @@ define <3 x double> @constrained_vector_min_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: mflr 0
; PC64LE-NEXT: stdu 1, -64(1)
; PC64LE-NEXT: li 3, 48
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: std 0, 80(1)
@@ -5182,6 +5322,10 @@ define <3 x double> @constrained_vector_min_v3f64(<3 x double> %x, <3 x double>
; PC64LE9: # %bb.0: # %entry
; PC64LE9-NEXT: mflr 0
; PC64LE9-NEXT: stdu 1, -48(1)
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: std 0, 64(1)
@@ -6520,6 +6664,8 @@ entry:
define <3 x double> @constrained_vector_ceil_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_ceil_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpip 3, 3
; PC64LE-NEXT: xvrdpip 2, 0
@@ -6528,6 +6674,8 @@ define <3 x double> @constrained_vector_ceil_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_ceil_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpip 3, 3
; PC64LE9-NEXT: xvrdpip 2, 0
@@ -6628,6 +6776,8 @@ entry:
define <3 x double> @constrained_vector_floor_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_floor_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpim 3, 3
; PC64LE-NEXT: xvrdpim 2, 0
@@ -6636,6 +6786,8 @@ define <3 x double> @constrained_vector_floor_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_floor_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpim 3, 3
; PC64LE9-NEXT: xvrdpim 2, 0
@@ -6736,6 +6888,8 @@ entry:
define <3 x double> @constrained_vector_round_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_round_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpi 3, 3
; PC64LE-NEXT: xvrdpi 2, 0
@@ -6744,6 +6898,8 @@ define <3 x double> @constrained_vector_round_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_round_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpi 3, 3
; PC64LE9-NEXT: xvrdpi 2, 0
@@ -6843,6 +6999,8 @@ entry:
define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_trunc_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpiz 3, 3
; PC64LE-NEXT: xvrdpiz 2, 0
@@ -6851,6 +7009,8 @@ define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_trunc_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpiz 3, 3
; PC64LE9-NEXT: xvrdpiz 2, 0
@@ -8049,6 +8209,7 @@ define <2 x double> @constrained_vector_tan_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl tan
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -8073,6 +8234,7 @@ define <2 x double> @constrained_vector_tan_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -8195,6 +8357,7 @@ define <3 x double> @constrained_vector_tan_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl tan
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl tan
@@ -8227,6 +8390,7 @@ define <3 x double> @constrained_vector_tan_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl tan
@@ -8270,6 +8434,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl tan
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl tan
@@ -8280,6 +8445,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -8308,6 +8474,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl tan
@@ -8316,6 +8483,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -8390,6 +8558,7 @@ define <2 x double> @constrained_vector_atan2_v2f64(<2 x double> %x, <2 x double
; PC64LE-NEXT: bl atan2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -8420,6 +8589,7 @@ define <2 x double> @constrained_vector_atan2_v2f64(<2 x double> %x, <2 x double
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 61, 1
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
@@ -8571,6 +8741,7 @@ define <3 x double> @constrained_vector_atan2_v3f64(<3 x double> %x, <3 x double
; PC64LE-NEXT: fmr 2, 30
; PC64LE-NEXT: bl atan2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 29
; PC64LE-NEXT: fmr 2, 31
@@ -8612,6 +8783,7 @@ define <3 x double> @constrained_vector_atan2_v3f64(<3 x double> %x, <3 x double
; PC64LE9-NEXT: fmr 2, 30
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 29
; PC64LE9-NEXT: fmr 2, 31
@@ -8667,6 +8839,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE-NEXT: xxswapd 2, 62
; PC64LE-NEXT: bl atan2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 59, 1
; PC64LE-NEXT: xxlor 1, 61, 61
; PC64LE-NEXT: xxlor 2, 63, 63
@@ -8679,6 +8852,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 112
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 60, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 96
@@ -8717,6 +8891,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE9-NEXT: xxswapd 2, 62
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 59, 1
; PC64LE9-NEXT: xscpsgndp 1, 61, 61
; PC64LE9-NEXT: xscpsgndp 2, 63, 63
@@ -8727,6 +8902,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 60, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..34493ce
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,33 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..6e1fe92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..1f91f41
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
@@ -0,0 +1,21 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll
index 1263892..4091524 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -684,13 +684,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -698,25 +698,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -724,8 +724,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -733,7 +733,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -741,12 +741,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -754,8 +754,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -767,7 +767,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -779,13 +779,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -793,8 +793,8 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 12(a0)
; RV32-NEXT: sb zero, 13(a0)
@@ -814,7 +814,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 12(a0)
; RV64-NEXT: sb zero, 13(a0)
@@ -834,7 +834,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
@@ -842,7 +842,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -851,8 +851,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 28(a0)
; RV32-NEXT: sb zero, 29(a0)
@@ -888,7 +888,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 28(a0)
; RV64-NEXT: sb zero, 29(a0)
@@ -924,7 +924,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 16(a0)
; RV32-FAST-NEXT: sw zero, 20(a0)
@@ -936,7 +936,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -947,8 +947,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 60(a0)
; RV32-NEXT: sb zero, 61(a0)
@@ -1016,7 +1016,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 60(a0)
; RV64-NEXT: sb zero, 61(a0)
@@ -1084,7 +1084,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 48(a0)
; RV32-FAST-NEXT: sw zero, 52(a0)
@@ -1104,7 +1104,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 32(a0)
; RV64-FAST-NEXT: sd zero, 40(a0)
@@ -1121,13 +1121,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1135,13 +1135,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1149,14 +1149,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1165,8 +1165,8 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
@@ -1174,7 +1174,7 @@ define void @aligned_bzero_16(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1183,8 +1183,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 16(a0)
; RV32-BOTH-NEXT: sw zero, 20(a0)
@@ -1196,7 +1196,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1207,8 +1207,8 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 48(a0)
; RV32-BOTH-NEXT: sw zero, 52(a0)
@@ -1228,7 +1228,7 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 32(a0)
; RV64-BOTH-NEXT: sd zero, 40(a0)
@@ -1247,28 +1247,28 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
; Usual overlap tricks
-define void @aligned_bzero_7(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_7:
+define void @aligned_memset_zero_7(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_7:
; RV32: # %bb.0:
; RV32-NEXT: sw zero, 0(a0)
; RV32-NEXT: sh zero, 4(a0)
; RV32-NEXT: sb zero, 6(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_7:
+; RV64-LABEL: aligned_memset_zero_7:
; RV64: # %bb.0:
; RV64-NEXT: sw zero, 0(a0)
; RV64-NEXT: sh zero, 4(a0)
; RV64-NEXT: sb zero, 6(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_7:
+; RV32-FAST-LABEL: aligned_memset_zero_7:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 3(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_7:
+; RV64-FAST-LABEL: aligned_memset_zero_7:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 3(a0)
; RV64-FAST-NEXT: sw zero, 0(a0)
@@ -1277,8 +1277,8 @@ define void @aligned_bzero_7(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_15(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_15:
+define void @aligned_memset_zero_15(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_15:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 14(a0)
; RV32-NEXT: sw zero, 0(a0)
@@ -1287,7 +1287,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-NEXT: sh zero, 12(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_15:
+; RV64-LABEL: aligned_memset_zero_15:
; RV64: # %bb.0:
; RV64-NEXT: sd zero, 0(a0)
; RV64-NEXT: sw zero, 8(a0)
@@ -1295,7 +1295,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV64-NEXT: sb zero, 14(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_15:
+; RV32-FAST-LABEL: aligned_memset_zero_15:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 11(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
@@ -1303,7 +1303,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 8(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_15:
+; RV64-FAST-LABEL: aligned_memset_zero_15:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 7(a0)
; RV64-FAST-NEXT: sd zero, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 5747bbb..bd37443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -554,9 +554,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; VLA-NEXT: vmv.v.i v10, 0
; VLA-NEXT: vmv1r.v v0, v8
-; VLA-NEXT: vmerge.vim v8, v10, 1, v0
; VLA-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vmerge.vim v9, v10, 1, v0
; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; VLA-NEXT: vmsne.vi v0, v9, 0
; VLA-NEXT: ret
@@ -568,9 +567,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLS-NEXT: vmv.v.i v9, 0
; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vmv1r.v v0, v8
-; VLS-NEXT: vmerge.vim v8, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLS-NEXT: vmv.v.v v10, v8
+; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; VLS-NEXT: vmsne.vi v0, v10, 0
; VLS-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-load.ll
new file mode 100644
index 0000000..dd63fa0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-load.ll
@@ -0,0 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv64 -mattr=+zve64x,+zvl128b < %s | FileCheck %s
+
+define {<8 x i8>, <8 x i8>} @load_factor2(ptr %ptr, i64 %stride) {
+; CHECK-LABEL: load_factor2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlsseg2e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.sseg2.load.mask.v8i8.i64.i64(ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret {<8 x i8>, <8 x i8>} %1
+}
+
+define {<8 x i8>, <8 x i8>, <8 x i8>} @load_factor3(ptr %ptr, i64 %stride) {
+; CHECK-LABEL: load_factor3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlsseg3e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.sseg3.load.mask.v8i8.i64.i64(ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %1
+}
+
+define {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} @load_factor4(ptr %ptr, i64 %stride) {
+; CHECK-LABEL: load_factor4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlsseg4e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.sseg4.load.mask.v8i8.i64.i64(ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1
+}
+
+define {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} @load_factor5(ptr %ptr, i64 %stride) {
+; CHECK-LABEL: load_factor5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlsseg5e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.sseg5.load.mask.v8i8.i64.i64(ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1
+}
+
+define {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} @load_factor6(ptr %ptr, i64 %stride) {
+; CHECK-LABEL: load_factor6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlsseg6e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.sseg6.load.mask.v8i8.i64.i64(ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1
+}
+
+define {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} @load_factor7(ptr %ptr, i64 %stride) {
+; CHECK-LABEL: load_factor7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlsseg7e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.sseg7.load.mask.v8i8.i64.i64(ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1
+}
+
+define {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} @load_factor8(ptr %ptr, i64 %stride) {
+; CHECK-LABEL: load_factor8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vlsseg8e8.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.sseg8.load.mask.v8i8.i64.i64(ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8)
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
index 8963940..2c11bd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
@@ -360,13 +360,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -374,25 +374,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -400,8 +400,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -409,7 +409,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -417,12 +417,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -430,8 +430,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -443,7 +443,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -455,13 +455,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -469,29 +469,29 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -501,8 +501,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
@@ -511,7 +511,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
@@ -520,7 +520,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
@@ -529,7 +529,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -541,8 +541,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: li a1, 64
; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -550,7 +550,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: li a1, 64
; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -558,14 +558,14 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -577,13 +577,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -591,13 +591,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -605,14 +605,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -621,15 +621,15 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -639,8 +639,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -649,7 +649,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -661,15 +661,15 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -679,8 +679,8 @@ define void @aligned_bzero_64(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_66(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_66:
+define void @aligned_memset_zero_66(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_66:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 64(a0)
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -688,7 +688,7 @@ define void @aligned_bzero_66(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_66:
+; RV64-BOTH-LABEL: aligned_memset_zero_66:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 64(a0)
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -699,8 +699,8 @@ define void @aligned_bzero_66(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_96(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_96:
+define void @aligned_memset_zero_96(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_96:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -713,7 +713,7 @@ define void @aligned_bzero_96(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_96:
+; RV64-BOTH-LABEL: aligned_memset_zero_96:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -729,15 +729,15 @@ define void @aligned_bzero_96(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_128(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_128:
+define void @aligned_memset_zero_128(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_128:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_128:
+; RV64-BOTH-LABEL: aligned_memset_zero_128:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -747,8 +747,8 @@ define void @aligned_bzero_128(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_256(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_256:
+define void @aligned_memset_zero_256(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_256:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -757,7 +757,7 @@ define void @aligned_bzero_256(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_256:
+; RV64-BOTH-LABEL: aligned_memset_zero_256:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index 3dc83d5..38d38f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -1636,3 +1636,49 @@ define <8 x half> @vector_interleave8_v8f16_v1f16(<1 x half> %a, <1 x half> %b,
%res = call <8 x half> @llvm.vector.interleave8.v8f16(<1 x half> %a, <1 x half> %b, <1 x half> %c, <1 x half> %d, <1 x half> %e, <1 x half> %f, <1 x half> %g, <1 x half> %h)
ret <8 x half> %res
}
+
+define <8 x i16> @interleave4_const_splat_v8i16(<2 x i16> %a) {
+; CHECK-LABEL: interleave4_const_splat_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_const_splat_v8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave4_const_splat_v8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZIP-NEXT: vmv.v.i v8, 3
+; ZIP-NEXT: ret
+ %retval = call <8 x i16> @llvm.vector.interleave4.v8i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 3), <2 x i16> splat(i16 3), <2 x i16> splat(i16 3))
+ ret <8 x i16> %retval
+}
+
+define <8 x i16> @interleave4_same_nonconst_splat_v8i16(i16 %a) {
+; CHECK-LABEL: interleave4_same_nonconst_splat_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_same_nonconst_splat_v8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave4_same_nonconst_splat_v8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZIP-NEXT: vmv.v.x v8, a0
+; ZIP-NEXT: ret
+ %ins = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <2 x i16> %ins, <2 x i16> poison, <2 x i32> zeroinitializer
+ %retval = call <8 x i16> @llvm.vector.interleave4.v8i16(<2 x i16> %splat, <2 x i16> %splat, <2 x i16> %splat, <2 x i16> %splat)
+ ret <8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 01cc5c5..ee38257 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -14947,3 +14947,147 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv2f64(<vscale x 2 x
%res = call <vscale x 16 x double> @llvm.vector.interleave8.nxv16f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6, <vscale x 2 x double> %v7)
ret <vscale x 16 x double> %res
}
+
+define <vscale x 4 x i16> @interleave2_same_const_splat_nxv4i16() {
+; CHECK-LABEL: interleave2_same_const_splat_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_same_const_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_const_splat_nxv4i16() {
+; V-LABEL: interleave2_diff_const_splat_nxv4i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT: vmv.v.i v9, 3
+; V-NEXT: li a0, 4
+; V-NEXT: vmv.v.i v10, -1
+; V-NEXT: vwaddu.vx v8, v9, a0
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v9, v8, a0
+; V-NEXT: vslideup.vx v8, v9, a0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_diff_const_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 4
+; ZVBB-NEXT: li a0, 3
+; ZVBB-NEXT: vwsll.vi v9, v8, 16
+; ZVBB-NEXT: vwaddu.wx v8, v9, a0
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: srli a0, a0, 2
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v9, v8, a0
+; ZVBB-NEXT: vslideup.vx v8, v9, a0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave2_diff_const_splat_nxv4i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT: vmv.v.i v9, 4
+; ZIP-NEXT: vmv.v.i v10, 3
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: ri.vzip2b.vv v11, v10, v9
+; ZIP-NEXT: ri.vzip2a.vv v8, v10, v9
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v8, v11, a0
+; ZIP-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.v4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 4))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_same_nonconst_splat_nxv4i16(i16 %a) {
+; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a0
+; ZVBB-NEXT: ret
+ %ins = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat, <vscale x 2 x i16> %splat)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) {
+; V-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; V-NEXT: vmv.v.x v9, a0
+; V-NEXT: vmv.v.i v10, -1
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vwaddu.vx v8, v9, a1
+; V-NEXT: vwmaccu.vx v8, a1, v10
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v9, v8, a0
+; V-NEXT: vslideup.vx v8, v9, a0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a1
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vwsll.vi v9, v8, 16
+; ZVBB-NEXT: vwaddu.wx v8, v9, a0
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v9, v8, a1
+; ZVBB-NEXT: vslideup.vx v8, v9, a1
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZIP-NEXT: vmv.v.x v9, a0
+; ZIP-NEXT: vmv.v.x v10, a1
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: ri.vzip2b.vv v11, v9, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v9, v10
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v8, v11, a0
+; ZIP-NEXT: ret
+ %ins1 = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <vscale x 2 x i16> %ins1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %ins2 = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <vscale x 2 x i16> %ins2, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat1, <vscale x 2 x i16> %splat2)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 8 x i16> @interleave4_same_const_splat_nxv8i16() {
+; CHECK-LABEL: interleave4_same_const_splat_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_same_const_splat_nxv8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+ %retval = call <vscale x 8 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 1e2e779..2f2035b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -222,3 +222,14 @@ define <vscale x 1 x i64> @vleff_move_past_passthru(ptr %p, ptr %q, iXLen %avl)
%b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %vec, iXLen %avl)
ret <vscale x 1 x i64> %b
}
+
+define <vscale x 1 x i64> @vmerge(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl) {
+; CHECK-LABEL: vmerge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %a, iXLen %avl)
+ ret <vscale x 1 x i64> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
index 6e106e5..9c3e96d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
@@ -152,3 +152,19 @@ body: |
%y:gpr = ADDI $x0, 1
%z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
...
+---
+name: vmerge_vvm
+body: |
+ bb.0:
+ liveins: $v8, $v0
+ ; CHECK-LABEL: name: vmerge_vvm
+ ; CHECK: liveins: $v8, $v0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %passthru:vrnov0 = COPY $v8
+ ; CHECK-NEXT: %mask:vmv0 = COPY $v0
+ ; CHECK-NEXT: %x:vrnov0 = PseudoVMERGE_VVM_M1 %passthru, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %passthru:vr = COPY $v8
+ %mask:vmv0 = COPY $v0
+ %x:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
+...
diff --git a/llvm/test/CodeGen/RISCV/xmips-cbop.ll b/llvm/test/CodeGen/RISCV/xmips-cbop.ll
index cbbd1de..0d5defc 100644
--- a/llvm/test/CodeGen/RISCV/xmips-cbop.ll
+++ b/llvm/test/CodeGen/RISCV/xmips-cbop.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv32 -mattr=+xmipscbop -mattr=+m -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+xmipscbop -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32XMIPSPREFETCH
-; RUN: llc -mtriple=riscv64 -mattr=+xmipscbop -mattr=+m -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+xmipscbop -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64XMIPSPREFETCH
define void @prefetch_data_read(ptr noundef %ptr) nounwind {
@@ -49,3 +49,54 @@ define void @prefetch_inst_read(ptr noundef %ptr) nounwind {
tail call void @llvm.prefetch.p0(ptr nonnull %arrayidx, i32 0, i32 0, i32 0)
ret void
}
+
+define void @prefetch_frameindex_test_neg() nounwind {
+; RV32XMIPSPREFETCH-LABEL: prefetch_frameindex_test_neg:
+; RV32XMIPSPREFETCH: # %bb.0:
+; RV32XMIPSPREFETCH-NEXT: lui a0, 1
+; RV32XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV32XMIPSPREFETCH-NEXT: sub sp, sp, a0
+; RV32XMIPSPREFETCH-NEXT: addi a0, sp, 524
+; RV32XMIPSPREFETCH-NEXT: mips.pref 8, 0(a0)
+; RV32XMIPSPREFETCH-NEXT: lui a0, 1
+; RV32XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV32XMIPSPREFETCH-NEXT: add sp, sp, a0
+; RV32XMIPSPREFETCH-NEXT: ret
+;
+; RV64XMIPSPREFETCH-LABEL: prefetch_frameindex_test_neg:
+; RV64XMIPSPREFETCH: # %bb.0:
+; RV64XMIPSPREFETCH-NEXT: lui a0, 1
+; RV64XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV64XMIPSPREFETCH-NEXT: sub sp, sp, a0
+; RV64XMIPSPREFETCH-NEXT: addi a0, sp, 524
+; RV64XMIPSPREFETCH-NEXT: mips.pref 8, 0(a0)
+; RV64XMIPSPREFETCH-NEXT: lui a0, 1
+; RV64XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV64XMIPSPREFETCH-NEXT: add sp, sp, a0
+; RV64XMIPSPREFETCH-NEXT: ret
+ %data = alloca [1024 x i32], align 4
+ %ptr = getelementptr [1024 x i32], ptr %data, i32 0, i32 127
+ call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
+ ret void
+}
+
+define void @prefetch_frameindex_test() nounwind {
+; RV32XMIPSPREFETCH-LABEL: prefetch_frameindex_test:
+; RV32XMIPSPREFETCH: # %bb.0:
+; RV32XMIPSPREFETCH-NEXT: addi sp, sp, -512
+; RV32XMIPSPREFETCH-NEXT: mips.pref 8, 32(sp)
+; RV32XMIPSPREFETCH-NEXT: addi sp, sp, 512
+; RV32XMIPSPREFETCH-NEXT: ret
+;
+; RV64XMIPSPREFETCH-LABEL: prefetch_frameindex_test:
+; RV64XMIPSPREFETCH: # %bb.0:
+; RV64XMIPSPREFETCH-NEXT: addi sp, sp, -512
+; RV64XMIPSPREFETCH-NEXT: mips.pref 8, 32(sp)
+; RV64XMIPSPREFETCH-NEXT: addi sp, sp, 512
+; RV64XMIPSPREFETCH-NEXT: ret
+ %data = alloca [128 x i32], align 4
+ %base = bitcast ptr %data to ptr
+ %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 8
+ call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/zilsd.ll b/llvm/test/CodeGen/RISCV/zilsd.ll
index 09b065a..048ce96 100644
--- a/llvm/test/CodeGen/RISCV/zilsd.ll
+++ b/llvm/test/CodeGen/RISCV/zilsd.ll
@@ -117,3 +117,22 @@ entyr:
store i64 0, ptr @g
ret void
}
+
+define void @large_offset(ptr nocapture %p, i64 %d) nounwind {
+; CHECK-LABEL: large_offset:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 4
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: ld a2, -384(a0)
+; CHECK-NEXT: addi a2, a2, 1
+; CHECK-NEXT: seqz a1, a2
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: sd a2, -384(a0)
+; CHECK-NEXT: ret
+entry:
+ %add.ptr = getelementptr inbounds i64, ptr %p, i64 2000
+ %a = load i64, ptr %add.ptr, align 8
+ %b = add i64 %a, 1
+ store i64 %b, ptr %add.ptr, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
new file mode 100644
index 0000000..b2333e6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
@@ -0,0 +1,42 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %}
+
+@.str = private unnamed_addr constant [4 x i8] c"In3\00", align 1
+@.str.2 = private unnamed_addr constant [5 x i8] c"Out4\00", align 1
+@.str.3 = private unnamed_addr constant [5 x i8] c"Out3\00", align 1
+
+
+; CHECK-DAG: %[[#INT32:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#INT4:]] = OpTypeVector %[[#INT32]] 4
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#FLOAT4:]] = OpTypeVector %[[#FLOAT]] 4
+; CHECK-DAG: %[[#INT3:]] = OpTypeVector %[[#INT32]] 3
+; CHECK-DAG: %[[#UNDEF_INT4:]] = OpUndef %[[#INT4]]
+
+define void @case1() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4i32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) %2, i32 0)
+ store <4 x i32> %4, ptr addrspace(11) %5, align 16
+ ret void
+}
+
+define void @case2() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ ; CHECK: %[[#VEC_TRUNCATE:]] = OpVectorShuffle %[[#INT3]] %[[#VEC_SHUFFLE]] %[[#UNDEF_INT4]] 0 1 2
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.3)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = shufflevector <4 x i32> %4, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+ %6 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3i32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) %2, i32 0)
+ store <3 x i32> %5, ptr addrspace(11) %6, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll
new file mode 100644
index 0000000..ec8330c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll
@@ -0,0 +1,408 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#BoolTy:]] = OpTypeBool
+; CHECK-DAG: %[[#FP32Ty:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#FP64Ty:]] = OpTypeFloat 64
+; CHECK-DAG: %[[#FP16Ty:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#I32Ty:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#I64Ty:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#I16Ty:]] = OpTypeInt 16 0
+
+; CHECK-DAG: %[[#V4I32Ty:]] = OpTypeVector %[[#I32Ty]] 4
+; CHECK-DAG: %[[#V4FP32Ty:]] = OpTypeVector %[[#FP32Ty]] 4
+; CHECK-DAG: %[[#V4BoolTy:]] = OpTypeVector %[[#BoolTy]] 4
+
+; CHECK-DAG: %[[#MaxExpMinus1:]] = OpConstant %[[#I32Ty]] 2130706432
+; CHECK-DAG: %[[#ExpLSB:]] = OpConstant %[[#I32Ty]] 8388608
+; CHECK-DAG: %[[#True:]] = OpConstantTrue %[[#BoolTy]]
+; CHECK-DAG: %[[#False:]] = OpConstantFalse %[[#BoolTy]]
+; CHECK-DAG: %[[#ValueMask:]] = OpConstant %[[#I32Ty]] 2147483647
+; CHECK-DAG: %[[#InfWithQnanBit:]] = OpConstant %[[#I32Ty]] 2143289344
+; CHECK-DAG: %[[#Inf:]] = OpConstant %[[#I32Ty]] 2139095040
+; CHECK-DAG: %[[#NegInf:]] = OpConstant %[[#I32Ty]] 4286578688
+; CHECK-DAG: %[[#One:]] = OpConstant %[[#I32Ty]] 1
+; CHECK-DAG: %[[#Zero:]] = OpConstantNull %[[#I32Ty]]
+; CHECK-DAG: %[[#AllOneMantissa:]] = OpConstant %[[#I32Ty]] 8388607
+; CHECK-DAG: %[[#SignBit:]] = OpConstant %[[#I32Ty]] 2147483648
+
+; CHECK-DAG: %[[#ValueMaskFP64:]] = OpConstant %[[#I64Ty]] 9223372036854775807
+; CHECK-DAG: %[[#InfFP64:]] = OpConstant %[[#I64Ty]] 9218868437227405312
+; CHECK-DAG: %[[#NegInfFP64:]] = OpConstant %[[#I64Ty]] 18442240474082181120
+
+; CHECK-DAG: %[[#FalseV4:]] = OpConstantComposite %[[#V4BoolTy]] %[[#False]] %[[#False]] %[[#False]] %[[#False]]
+; CHECK-DAG: %[[#ValueMaskV4:]] = OpConstantComposite %[[#V4I32Ty]] %[[#ValueMask]] %[[#ValueMask]] %[[#ValueMask]] %[[#ValueMask]]
+; CHECK-DAG: %[[#InfV4:]] = OpConstantComposite %[[#V4I32Ty]] %[[#Inf]] %[[#Inf]] %[[#Inf]] %[[#Inf]]
+; CHECK-DAG: %[[#InfWithQnanBitV4:]] = OpConstantComposite %[[#V4I32Ty]] %[[#InfWithQnanBit]] %[[#InfWithQnanBit]] %[[#InfWithQnanBit]] %[[#InfWithQnanBit]]
+; CHECK-DAG: %[[#ValueMaskFP16:]] = OpConstant %[[#I16Ty]] 32767
+; CHECK-DAG: %[[#InfFP16:]] = OpConstant %[[#I16Ty]] 31744
+; CHECK-DAG: %[[#NegInfFP16:]] = OpConstant %[[#I16Ty]] 64512
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: OpReturnValue %[[#False]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_0_none(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 0)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#InfWithQnanBit]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T2]] %[[#T3]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: OpReturnValue %[[#T5]]
+; CHECK: OpFunctionEnd
+
+define i1 @isfpclass_1_issnan(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 1)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#V4BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#V4FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#V4I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#V4I32Ty]] %[[#T0]] %[[#ValueMaskV4]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#V4BoolTy]] %[[#T1]] %[[#InfV4]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#V4BoolTy]] %[[#T1]] %[[#InfWithQnanBitV4]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#V4BoolTy]] %[[#T2]] %[[#T3]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#V4BoolTy]] %[[#FalseV4]] %[[#T4]]
+; CHECK: OpReturnValue %[[#T5]]
+; CHECK: OpFunctionEnd
+
+define <4 x i1> @isfpclass_1_issnan_v4f32(<4 x float> %a) {
+ %v = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> %a, i32 1)
+ ret <4 x i1> %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThanEqual %[[#BoolTy]] %[[#T1]] %[[#InfWithQnanBit]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isqnan(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 2)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isnan(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 3)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_ispinf(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 512)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isninf(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 4)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpIEqual %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isinf(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 516)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T4:]] = OpULessThan %[[#BoolTy]] %[[#T3]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T5:]] = OpLogicalNotEqual %[[#BoolTy]] %[[#T2]] %[[#True]]
+; CHECK: %[[#T6:]] = OpLogicalAnd %[[#BoolTy]] %[[#T4]] %[[#T5]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isposnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 256)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T4:]] = OpULessThan %[[#BoolTy]] %[[#T3]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T5:]] = OpLogicalAnd %[[#BoolTy]] %[[#T4]] %[[#T2]]
+; CHECK: %[[#T6:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T5]]
+; CHECK: OpReturnValue %[[#T6]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnegnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 8)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T2]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T4:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T3]]
+; CHECK: OpReturnValue %[[#T4]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 264)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: %[[#T4:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T5:]] = OpULessThan %[[#BoolTy]] %[[#T4]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T6:]] = OpLogicalOr %[[#BoolTy]] %[[#T3]] %[[#T5]]
+; CHECK: OpReturnValue %[[#T6]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isnan_or_normal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 267)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpISub %[[#I32Ty]] %[[#T0]] %[[#One]]
+; CHECK: %[[#T2:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#AllOneMantissa]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispsubnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 128)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#One]]
+; CHECK: %[[#T4:]] = OpULessThan %[[#BoolTy]] %[[#T3]] %[[#AllOneMantissa]]
+; CHECK: %[[#T5:]] = OpLogicalAnd %[[#BoolTy]] %[[#T4]] %[[#T2]]
+; CHECK: %[[#T6:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T5]]
+; CHECK: OpReturnValue %[[#T6]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnsubnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 16)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#One]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T2]] %[[#AllOneMantissa]]
+; CHECK: %[[#T4:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T3]]
+; CHECK: OpReturnValue %[[#T4]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_issubnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 144)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#Zero]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispzero(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 64)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#SignBit]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnzero(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 32)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpIEqual %[[#BoolTy]] %[[#T1]] %[[#Zero]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_iszero(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 96)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpULessThan %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispfinite(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 448)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: OpReturnValue %[[#T5]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnfinite(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 56)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isfinite(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 504)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpULessThan %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T4:]] = OpLogicalOr %[[#BoolTy]] %[[#T2]] %[[#T3]]
+; CHECK: OpReturnValue %[[#T4]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispositive(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 960)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: %[[#T6:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInf]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#T5]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnegative(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 60)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: OpReturnValue %[[#True]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_all(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 1023)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP64Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I64Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I64Ty]] %[[#T0]] %[[#ValueMaskFP64]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#InfFP64]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: %[[#T6:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInfFP64]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#T5]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_f64_isnegative(double %a) {
+ %v = call i1 @llvm.is.fpclass.f64(double %a, i32 60)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP16Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I16Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I16Ty]] %[[#T0]] %[[#ValueMaskFP16]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#InfFP16]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: %[[#T6:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInfFP16]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#T5]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_f16_isnegative(half %a) {
+ %v = call i1 @llvm.is.fpclass.f16(half %a, i32 60)
+ ret i1 %v
+}
+
+declare i1 @llvm.is.fpclass.f32(float, i32)
+declare <4 x i1> @llvm.is.fpclass.v4f32(<4 x float>, i32)
+declare i1 @llvm.is.fpclass.f64(double, i32)
+declare i1 @llvm.is.fpclass.f16(half, i32)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
index 085f8b3..9d07b63 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
@@ -33,7 +33,7 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
%RoundedRangeKernel = alloca %tprange, align 8
call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
- %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel)
ret void
}
@@ -55,7 +55,7 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
%RoundedRangeKernel = alloca %tprange, align 8
call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
- %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
ret void
}
diff --git a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
index a1ff1e0..66337b1 100644
--- a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
+++ b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -print-after-all | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll
new file mode 100644
index 0000000..26dc60e
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll
@@ -0,0 +1,46 @@
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+%struct.S1 = type { <4 x i32>, [10 x <4 x float>], <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+
+define <4 x float> @main() {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %3 = tail call noundef align 1 dereferenceable(192) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 0)
+
+; CHECK-DAG: %[[#ulong:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#ulong_1:]] = OpConstant %[[#ulong]] 1
+; CHECK-DAG: %[[#ulong_3:]] = OpConstant %[[#ulong]] 3
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
+; CHECK-DAG: %[[#uint_10:]] = OpConstant %[[#uint]] 10
+
+; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#v4f:]] = OpTypeVector %[[#float]] 4
+; CHECK-DAG: %[[#arr_v4f:]] = OpTypeArray %[[#v4f]] %[[#uint_10]]
+; CHECK-DAG: %[[#S1:]] = OpTypeStruct %[[#]] %[[#arr_v4f]] %[[#]]
+; CHECK-DAG: %[[#sb_S1:]] = OpTypePointer StorageBuffer %[[#S1]]
+; CHECK-DAG: %[[#sb_v4f:]] = OpTypePointer StorageBuffer %[[#v4f]]
+
+; CHECK: %[[#tmp:]] = OpAccessChain %[[#sb_S1]] %[[#]] %[[#uint_0]] %[[#uint_0]]
+; CHECK: %[[#ptr:]] = OpInBoundsAccessChain %[[#sb_v4f]] %[[#tmp]] %[[#ulong_1]] %[[#ulong_3]]
+; This rewritten GEP combined all constant indices into a single value.
+; We should make sure the correct indices are retrieved.
+ %arrayidx.i = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 64
+
+; CHECK: OpLoad %[[#v4f]] %[[#ptr]]
+ %4 = load <4 x float>, ptr addrspace(11) %arrayidx.i, align 1
+
+ ret <4 x float> %4
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll
new file mode 100644
index 0000000..a6efb38
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll
@@ -0,0 +1,54 @@
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+%struct.S1 = type { <4 x i32>, [10 x <4 x float>], <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+
+define <4 x float> @main(i32 %index) {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %3 = tail call noundef align 1 dereferenceable(192) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 0)
+
+; CHECK-DAG: %[[#ulong:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#ulong_1:]] = OpConstant %[[#ulong]] 1
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
+; CHECK-DAG: %[[#uint_10:]] = OpConstant %[[#uint]] 10
+; CHECK-DAG: %[[#uint_16:]] = OpConstant %[[#uint]] 16
+
+; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#v4f:]] = OpTypeVector %[[#float]] 4
+; CHECK-DAG: %[[#arr_v4f:]] = OpTypeArray %[[#v4f]] %[[#uint_10]]
+; CHECK-DAG: %[[#S1:]] = OpTypeStruct %[[#]] %[[#arr_v4f]] %[[#]]
+; CHECK-DAG: %[[#sb_S1:]] = OpTypePointer StorageBuffer %[[#S1]]
+; CHECK-DAG: %[[#sb_arr_v4f:]] = OpTypePointer StorageBuffer %[[#arr_v4f]]
+; CHECK-DAG: %[[#sb_v4f:]] = OpTypePointer StorageBuffer %[[#v4f]]
+
+; CHECK: %[[#a:]] = OpAccessChain %[[#sb_S1]] %[[#]] %[[#uint_0]] %[[#uint_0]]
+; CHECK: %[[#b:]] = OpInBoundsAccessChain %[[#sb_arr_v4f]] %[[#a]] %[[#ulong_1]]
+ %4 = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 16
+
+; CHECK: %[[#offset:]] = OpIMul %[[#]] %[[#]] %[[#uint_16]]
+; Offset is computed in bytes. Make sure we reconvert it back to an index.
+ %offset = mul i32 %index, 16
+
+; CHECK: %[[#index:]] = OpUDiv %[[#]] %[[#offset]] %[[#uint_16]]
+; CHECK: %[[#c:]] = OpInBoundsAccessChain %[[#sb_v4f]] %[[#b]] %[[#index]]
+ %5 = getelementptr inbounds nuw i8, ptr addrspace(11) %4, i32 %offset
+
+; CHECK: OpLoad %[[#v4f]] %[[#c]]
+ %6 = load <4 x float>, ptr addrspace(11) %5, align 1
+
+ ret <4 x float> %6
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll
new file mode 100644
index 0000000..8e6b5a6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll
@@ -0,0 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+; struct S1 {
+; int4 i;
+; float4 f;
+; };
+; struct S2 {
+; float4 f;
+; int4 i;
+; };
+;
+; StructuredBuffer<S1> In : register(t1);
+; RWStructuredBuffer<S2> Out : register(u0);
+;
+; [numthreads(1,1,1)]
+; void main(uint GI : SV_GroupIndex) {
+; Out[GI].f = In[GI].f;
+; Out[GI].i = In[GI].i;
+; }
+
+%struct.S1 = type { <4 x i32>, <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+@.str.2 = private unnamed_addr constant [4 x i8] c"Out\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+; CHECK-LABEL: main
+; CHECK: %43 = OpFunction %2 None %3 ; -- Begin function main
+; CHECK-NEXT: %1 = OpLabel
+; CHECK-NEXT: %44 = OpVariable %28 Function %38
+; CHECK-NEXT: %45 = OpVariable %27 Function %39
+; CHECK-NEXT: %46 = OpCopyObject %19 %40
+; CHECK-NEXT: %47 = OpCopyObject %16 %41
+; CHECK-NEXT: %48 = OpLoad %4 %42
+; CHECK-NEXT: %49 = OpAccessChain %13 %46 %29 %48
+; CHECK-NEXT: %50 = OpInBoundsAccessChain %9 %49 %31
+; CHECK-NEXT: %51 = OpLoad %8 %50 Aligned 1
+; CHECK-NEXT: %52 = OpAccessChain %11 %47 %29 %48
+; CHECK-NEXT: %53 = OpInBoundsAccessChain %9 %52 %29
+; CHECK-NEXT: OpStore %53 %51 Aligned 1
+; CHECK-NEXT: %54 = OpAccessChain %6 %49 %29
+; CHECK-NEXT: %55 = OpLoad %5 %54 Aligned 1
+; CHECK-NEXT: %56 = OpInBoundsAccessChain %6 %52 %31
+; CHECK-NEXT: OpStore %56 %55 Aligned 1
+; CHECK-NEXT: OpReturn
+; CHECK-NEXT: OpFunctionEnd
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %1 = tail call target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %2 = tail call i32 @llvm.spv.flattened.thread.id.in.group()
+ %3 = tail call noundef align 1 dereferenceable(32) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 %2)
+ %f.i = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 16
+ %4 = load <4 x float>, ptr addrspace(11) %f.i, align 1
+ %5 = tail call noundef align 1 dereferenceable(32) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) %1, i32 %2)
+ store <4 x float> %4, ptr addrspace(11) %5, align 1
+ %6 = load <4 x i32>, ptr addrspace(11) %3, align 1
+ %i6.i = getelementptr inbounds nuw i8, ptr addrspace(11) %5, i64 16
+ store <4 x i32> %6, ptr addrspace(11) %i6.i, align 1
+ ret void
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+
+declare target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(i32, i32, i32, i32, i1, ptr)
+
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1), i32)
+
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
new file mode 100644
index 0000000..8b5efe7
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
@@ -0,0 +1,104 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpCapability Float16
+; CHECK-DAG: OpCapability ImageBasic
+; CHECK-DAG: OpCapability ImageReadWrite
+; CHECK-DAG: OpCapability Pipes
+; CHECK-DAG: OpCapability DeviceEnqueue
+
+; CHECK-DAG: %[[#VOID:]] = OpTypeVoid
+; CHECK-DAG: %[[#INT:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#HALF:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#PIPE_RD:]] = OpTypePipe ReadOnly
+; CHECK-DAG: %[[#PIPE_WR:]] = OpTypePipe WriteOnly
+; CHECK-DAG: %[[#IMG1D_RD:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown ReadOnly
+; CHECK-DAG: %[[#IMG2D_RD:]] = OpTypeImage %[[#INT]] 2D 0 0 0 0
+; CHECK-DAG: %[[#IMG3D_RD:]] = OpTypeImage %[[#INT]] 3D 0 0 0 0
+; CHECK-DAG: %[[#IMG2DA_RD:]] = OpTypeImage %[[#HALF]] 2D 0 1 0 0
+; CHECK-DAG: %[[#IMG2DD_RD:]] = OpTypeImage %[[#FLOAT]] Buffer 0 0 0
+; CHECK-DAG: %[[#IMG1D_WR:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown WriteOnly
+; CHECK-DAG: %[[#IMG2D_RW:]] = OpTypeImage %[[#VOID]] 2D 0 0 0 0 Unknown ReadWrite
+; CHECK-DAG: %[[#IMG1DB_RD:]] = OpTypeImage %[[#FLOAT]] 2D 1 0 0 0
+
+; CHECK-DAG: %[[#DEVEVENT:]] = OpTypeDeviceEvent
+; CHECK-DAG: %[[#EVENT:]] = OpTypeEvent
+; CHECK-DAG: %[[#QUEUE:]] = OpTypeQueue
+; CHECK-DAG: %[[#RESID:]] = OpTypeReserveId
+; CHECK-DAG: %[[#SAMP:]] = OpTypeSampler
+; CHECK-DAG: %[[#SAMPIMG:]] = OpTypeSampledImage %[[#IMG1DB_RD]]
+
+; CHECK-DAG: %[[#]] = OpFunction %[[#VOID]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG3D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DA_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DD_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RW]]
+
+define spir_kernel void @foo(
+ target("spirv.Pipe", 0) %a,
+ target("spirv.Pipe", 1) %b,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %c1,
+ target("spirv.Image", i32, 1, 0, 0, 0, 0, 0, 0) %d1,
+ target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) %e1,
+ target("spirv.Image", half, 1, 0, 1, 0, 0, 0, 0) %f1,
+ target("spirv.Image", float, 5, 0, 0, 0, 0, 0, 0) %g1,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1) %c2,
+ target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 2) %d3) #0 !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
+entry:
+ ret void
+}
+
+; CHECK-DAG: %[[#]] = OpFunction
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#DEVEVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#EVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#QUEUE]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#RESID]]
+
+; CHECK-DAG: %[[#IMARG:]] = OpFunctionParameter %[[#IMG1DB_RD]]
+; CHECK-DAG: %[[#SAMARG:]] = OpFunctionParameter %[[#SAMP]]
+; CHECK-DAG: %[[#SAMPIMVAR:]] = OpSampledImage %[[#SAMPIMG]] %[[#IMARG]] %[[#SAMARG]]
+; CHECK-DAG: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SAMPIMVAR]]
+
+define spir_func void @bar(
+ target("spirv.DeviceEvent") %a,
+ target("spirv.Event") %b,
+ target("spirv.Queue") %c,
+ target("spirv.ReserveId") %d) {
+ ret void
+}
+
+define spir_func void @test_sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce,
+ target("spirv.Sampler") %s.coerce) {
+ %1 = tail call spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce, target("spirv.Sampler") %s.coerce) #1
+ %2 = tail call spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) %1, <4 x i32> zeroinitializer, i32 2, float 1.000000e+00) #1
+ ret void
+}
+
+declare spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0), target("spirv.Sampler"))
+
+declare spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0), <4 x i32>, i32, float)
+
+attributes #0 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!opencl.enable.FP_CONTRACT = !{}
+!opencl.spir.version = !{!6}
+!opencl.ocl.version = !{!7}
+!opencl.used.extensions = !{!8}
+!opencl.used.optional.core.features = !{!9}
+!opencl.compiler.options = !{!8}
+
+!1 = !{i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1}
+!2 = !{!"read_only", !"write_only", !"read_only", !"read_only", !"read_only", !"read_only", !"read_only", !"write_only", !"read_write"}
+!3 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!4 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!5 = !{!"pipe", !"pipe", !"", !"", !"", !"", !"", !"", !""}
+!6 = !{i32 1, i32 2}
+!7 = !{i32 2, i32 0}
+!8 = !{!"cl_khr_fp16"}
+!9 = !{!"cl_images"}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
new file mode 100644
index 0000000..63b2604
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
@@ -0,0 +1,19 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#Fun:]] "_Z3booi"
+; CHECK-DAG: OpDecorate %[[#Param:]] FuncParamAttr Sret
+; CHECK-DAG: %[[#PtrTy:]] = OpTypePointer Function %[[#StructTy:]]
+; CHECK-DAG: %[[#StructTy]] = OpTypeStruct
+; CHECK: %[[#Fun]] = OpFunction %[[#]]
+; CHECK: %[[#Param]] = OpFunctionParameter %[[#PtrTy]]
+
+%struct.Example = type { }
+
+define spir_func i32 @foo() {
+ %1 = alloca %struct.Example, align 8
+ call void @_Z3booi(ptr sret(%struct.Example) align 8 %1, i32 noundef 42)
+ ret i32 0
+}
+
+declare void @_Z3booi(ptr sret(%struct.Example) align 8, i32 noundef)
diff --git a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
index 73c7a8d..5835616 100644
--- a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
@@ -3,6 +3,23 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+; Test a v16i8 -> v8i16 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f1_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhb %v0, %v24
+; CHECK-NEXT: vuplhb %v1, %v26
+; CHECK-NEXT: vmlhw %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext1 = zext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext2 = zext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %zext1, %zext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 unsigned widening multiplication.
define <8 x i16> @f1(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f1:
@@ -31,6 +48,23 @@ define <8 x i16> @f2(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v16i8 -> v8i16 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f3_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphb %v0, %v26
+; CHECK-NEXT: vuphb %v1, %v24
+; CHECK-NEXT: vmlhw %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext1 = sext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext2 = sext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %sext1, %sext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 signed widening multiplication.
define <8 x i16> @f3(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f3:
@@ -59,6 +93,23 @@ define <8 x i16> @f4(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v8i16 -> v4i32 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f5_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f5_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhh %v0, %v24
+; CHECK-NEXT: vuplhh %v1, %v26
+; CHECK-NEXT: vmlf %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext1 = zext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext2 = zext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %zext1, %zext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 unsigned widening multiplication.
define <4 x i32> @f5(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f5:
@@ -87,6 +138,23 @@ define <4 x i32> @f6(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v8i16 -> v4i32 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f7_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphh %v0, %v26
+; CHECK-NEXT: vuphh %v1, %v24
+; CHECK-NEXT: vmlf %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext1 = sext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext2 = sext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %sext1, %sext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 signed widening multiplication.
define <4 x i32> @f7(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f7:
@@ -115,6 +183,29 @@ define <4 x i32> @f8(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v4i32 -> v2i64 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f9_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f9_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhf %v0, %v24
+; CHECK-NEXT: vuplhf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext1 = zext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext2 = zext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %zext1, %zext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 unsigned widening multiplication.
define <2 x i64> @f9(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f9:
@@ -143,6 +234,29 @@ define <2 x i64> @f10(<4 x i32> %val1, <4 x i32> %val2) {
ret <2 x i64> %ret
}
+; Test a v4i32 -> v2i64 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f11_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f11_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphf %v0, %v24
+; CHECK-NEXT: vuphf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext1 = sext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext2 = sext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %sext1, %sext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 signed widening multiplication.
define <2 x i64> @f11(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f11:
diff --git a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
index 60cfc27..4a4973b 100644
--- a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
+++ b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
@@ -68,9 +68,9 @@ target triple = "wasm32-unknown-unknown"
; bleeding-edge: +atomics, +bulk-memory, +bulk-memory-opt,
; +call-indirect-overlong, +exception-handling,
-; +extended-const, +fp16, +multimemory, +multivalue,
+; +extended-const, +fp16, +gc, +multimemory, +multivalue,
; +mutable-globals, +nontrapping-fptoint, +relaxed-simd,
-; +reference-types, +simd128, +sign-ext, +tail-call, +gc
+; +reference-types, +simd128, +sign-ext, +tail-call
; BLEEDING-EDGE-LABEL: .section .custom_section.target_features,"",@
; BLEEDING-EDGE-NEXT: .int8 17
; BLEEDING-EDGE-NEXT: .int8 43
diff --git a/llvm/test/CodeGen/X86/apx/cf.ll b/llvm/test/CodeGen/X86/apx/cf.ll
index b111ae5..e52ce6c 100644
--- a/llvm/test/CodeGen/X86/apx/cf.ll
+++ b/llvm/test/CodeGen/X86/apx/cf.ll
@@ -194,3 +194,38 @@ entry:
call void @llvm.masked.store.v1i64.p0(<1 x i64> %3, ptr %p, i32 4, <1 x i1> %0)
ret void
}
+
+define void @sink_gep(ptr %p, i1 %cond) {
+; CHECK-LABEL: sink_gep:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovnel %eax, 112(%rdi)
+; CHECK-NEXT: cfcmovnel 112(%rdi), %eax
+; CHECK-NEXT: movl %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = getelementptr i8, ptr %p, i64 112
+ br label %next
+
+next:
+ %1 = bitcast i1 %cond to <1 x i1>
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %0, i32 1, <1 x i1> %1)
+ %2 = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr %0, i32 1, <1 x i1> %1, <1 x i32> zeroinitializer)
+ store <1 x i32> %2, ptr %p, align 4
+ ret void
+}
+
+define void @xor_cond(ptr %p, i1 %cond) {
+; CHECK-LABEL: xor_cond:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovel %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = xor i1 %cond, true
+ %1 = insertelement <1 x i1> zeroinitializer, i1 %0, i64 0
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %p, i32 1, <1 x i1> %1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
new file mode 100644
index 0000000..1136287
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
@@ -0,0 +1,43 @@
+;; Test if temporary labels are generated for each indirect callsite with a callee_type metadata.
+;; Test if the .callgraph section contains the MD5 hash of callee type ids generated from
+;; generalized type id strings.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -o - < %s | FileCheck %s
+
+; CHECK: ball:
+; CHECK-NEXT: [[LABEL_FUNC:\.Lfunc_begin[0-9]+]]:
+define ptr @ball() {
+entry:
+ %fp_foo_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP0:\.L.*]]:
+ call void (...) %fp_foo_val(), !callee_type !0
+ %fp_bar_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP1:\.L.*]]:
+ %call_fp_bar = call i32 %fp_bar_val(i8 0), !callee_type !2
+ %fp_baz_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP2:\.L.*]]:
+ %call_fp_baz = call ptr %fp_baz_val(ptr null), !callee_type !4
+ ret ptr %call_fp_baz
+}
+
+; CHECK: .section .callgraph,"o",@progbits,.text
+
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .quad [[LABEL_FUNC]]
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad 3
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvE.generalized"}
+;; Test for MD5 hash of _ZTSFvE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 4524972987496481828
+; CHECK-NEXT: .quad [[LABEL_TMP0]]
+!2 = !{!3}
+!3 = !{i64 0, !"_ZTSFicE.generalized"}
+;; Test for MD5 hash of _ZTSFicE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 3498816979441845844
+; CHECK-NEXT: .quad [[LABEL_TMP1]]
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
+;; Test for MD5 hash of _ZTSFPvS_E.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 8646233951371320954
+; CHECK-NEXT: .quad [[LABEL_TMP2]]
diff --git a/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
new file mode 100644
index 0000000..fa14a98
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
@@ -0,0 +1,34 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file for tailcalls.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+define i32 @main(i32 %argc) !type !3 {
+entry:
+ %andop = and i32 %argc, 1
+ %cmp = icmp eq i32 %andop, 0
+ %foo.bar = select i1 %cmp, ptr @foo, ptr @bar
+ %call.i = tail call i32 %foo.bar(i8 signext 97), !callee_type !1
+ ret i32 %call.i
+}
+
+declare !type !2 i32 @foo(i8 signext)
+
+declare !type !2 i32 @bar(i8 signext)
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{i64 0, !"_ZTSFiiE.generalized"}
diff --git a/llvm/test/CodeGen/X86/call-graph-section.ll b/llvm/test/CodeGen/X86/call-graph-section.ll
new file mode 100644
index 0000000..4a9840e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section.ll
@@ -0,0 +1,38 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+declare !type !0 void @foo()
+
+declare !type !1 i32 @bar(i8)
+
+declare !type !2 ptr @baz(ptr)
+
+define void @main() {
+entry:
+ %a = alloca i8, align 1
+ %fp_foo_val = load ptr, ptr null, align 8
+ call void (...) %fp_foo_val(), !callee_type !1
+ %fp_bar_val = load ptr, ptr null, align 8
+ %param = trunc i64 0 to i8
+ %call_fp_bar = call i32 %fp_bar_val(i8 signext %param), !callee_type !3
+ %fp_baz_val = load ptr, ptr null, align 8
+ %call_fp_baz = call ptr %fp_baz_val(ptr %a), !callee_type !4
+ ret void
+}
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+; CHECK-DAG: 2444f731 f5eecb3e
+!0 = !{i64 0, !"_ZTSFvE.generalized"}
+!1 = !{!0}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{!2}
+; CHECK-DAG: 7ade6814 f897fd77
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
diff --git a/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..7881ea7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple x86_64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..8f6b7a6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..e97a6ac
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll b/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll
new file mode 100644
index 0000000..ea7454f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll
@@ -0,0 +1,185 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=x86_64-grtev4-linux-gnu < %s | FileCheck %s
+
+%struct.wibble = type { %struct.wombat }
+%struct.wombat = type { %struct.ham, [3 x i8] }
+%struct.ham = type { %struct.zot }
+%struct.zot = type { %struct.blam }
+%struct.blam = type { %struct.ham.0 }
+%struct.ham.0 = type { %struct.bar }
+%struct.bar = type { %struct.bar.1 }
+%struct.bar.1 = type { %struct.baz, i8 }
+%struct.baz = type { %struct.snork }
+%struct.snork = type <{ %struct.spam, i8, [3 x i8] }>
+%struct.spam = type { %struct.snork.2, %struct.snork.2 }
+%struct.snork.2 = type { i32 }
+%struct.snork.3 = type { %struct.baz, i8, [3 x i8] }
+
+define void @foo(ptr %arg, ptr %arg1, i40 %arg2, ptr %arg3, i32 %arg4) #0 {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: movl %r8d, %r14d
+; CHECK-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT: movq %rsi, %r13
+; CHECK-NEXT: movq %rdi, %r15
+; CHECK-NEXT: incl %r14d
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: # implicit-def: $r12
+; CHECK-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT: jmp .LBB0_3
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_1: # %bb17
+; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: movq %r15, %r13
+; CHECK-NEXT: xorl %r15d, %r15d
+; CHECK-NEXT: testq %rbx, %rbx
+; CHECK-NEXT: sete %r15b
+; CHECK-NEXT: xorl %edi, %edi
+; CHECK-NEXT: callq _Znwm@PLT
+; CHECK-NEXT: shll $4, %r15d
+; CHECK-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; CHECK-NEXT: movq %r12, %rcx
+; CHECK-NEXT: shrq $32, %rcx
+; CHECK-NEXT: movb %cl, 12(%rax)
+; CHECK-NEXT: movl %r12d, 8(%rax)
+; CHECK-NEXT: movq %r15, %rbx
+; CHECK-NEXT: movq %r13, %r15
+; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; CHECK-NEXT: decl %r14d
+; CHECK-NEXT: je .LBB0_8
+; CHECK-NEXT: .LBB0_3: # %bb7
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: callq widget@PLT
+; CHECK-NEXT: cmpb $-5, (%r13)
+; CHECK-NEXT: jae .LBB0_5
+; CHECK-NEXT: # %bb.4: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: movl %r12d, %r12d
+; CHECK-NEXT: cmpq %r15, %rbx
+; CHECK-NEXT: jbe .LBB0_1
+; CHECK-NEXT: jmp .LBB0_7
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_5: # %bb12
+; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: movq 0, %rax
+; CHECK-NEXT: movq 8, %rax
+; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; CHECK-NEXT: cmpq %r15, %rbx
+; CHECK-NEXT: jbe .LBB0_1
+; CHECK-NEXT: .LBB0_7: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: decl %r14d
+; CHECK-NEXT: jne .LBB0_3
+; CHECK-NEXT: .LBB0_8: # %bb21
+; CHECK-NEXT: cmpb $0, 12(%rax)
+; CHECK-NEXT: jne .LBB0_10
+; CHECK-NEXT: # %bb.9: # %bb26
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_10: # %bb25
+; CHECK-NEXT: .cfi_def_cfa %rbp, 16
+; CHECK-NEXT: movq %r15, %rdi
+; CHECK-NEXT: callq pluto@PLT
+bb:
+ br label %bb7
+
+bb5: ; preds = %bb17, %bb14
+ %phi = phi ptr [ %call19, %bb17 ], [ null, %bb14 ]
+ %phi6 = phi ptr [ %getelementptr, %bb17 ], [ null, %bb14 ]
+ %add = add i32 %phi9, 1
+ %icmp = icmp eq i32 %phi9, %arg4
+ br i1 %icmp, label %bb21, label %bb7
+
+bb7: ; preds = %bb5, %bb
+ %phi8 = phi ptr [ null, %bb ], [ %phi6, %bb5 ]
+ %phi9 = phi i32 [ 0, %bb ], [ %add, %bb5 ]
+ %phi10 = phi i40 [ poison, %bb ], [ %phi15, %bb5 ]
+ %call = call ptr @widget()
+ %load = load i8, ptr %arg1, align 8
+ %icmp11 = icmp ult i8 %load, -5
+ %and = and i40 %phi10, 4294967295
+ br i1 %icmp11, label %bb14, label %bb12
+
+bb12: ; preds = %bb7
+ %load13 = load volatile { i64, i64 }, ptr null, align 4294967296
+ br label %bb14
+
+bb14: ; preds = %bb12, %bb7
+ %phi15 = phi i40 [ %and, %bb7 ], [ %arg2, %bb12 ]
+ %icmp16 = icmp ugt ptr %phi8, %arg
+ br i1 %icmp16, label %bb5, label %bb17
+
+bb17: ; preds = %bb14
+ %icmp18 = icmp eq ptr %phi8, null
+ %zext = zext i1 %icmp18 to i64
+ %call19 = call ptr @_Znwm(i64 0)
+ %getelementptr = getelementptr %struct.wibble, ptr %arg3, i64 %zext
+ %getelementptr20 = getelementptr i8, ptr %call19, i64 8
+ store i40 %phi15, ptr %getelementptr20, align 4
+ br label %bb5
+
+bb21: ; preds = %bb5
+ %getelementptr22 = getelementptr %struct.snork.3, ptr %phi, i64 0, i32 1
+ %load23 = load i8, ptr %getelementptr22, align 4
+ %icmp24 = icmp eq i8 %load23, 0
+ br i1 %icmp24, label %bb26, label %bb25
+
+bb25: ; preds = %bb21
+ call void @pluto(ptr %arg)
+ unreachable
+
+bb26: ; preds = %bb21
+ ret void
+}
+
+define void @eggs(ptr %arg, ptr %arg1) {
+; CHECK-LABEL: eggs:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: movq %rsi, %rdi
+; CHECK-NEXT: movq %rax, %rsi
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: xorl %r8d, %r8d
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+bb:
+ call void @foo(ptr %arg1, ptr %arg, i40 0, ptr null, i32 0)
+ ret void
+}
+
+declare ptr @widget()
+
+declare void @pluto(ptr)
+
+declare ptr @_Znwm(i64)
+
+attributes #0 = { noinline "frame-pointer"="all" }
diff --git a/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir b/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir
index 8241a17..0bc208d 100644
--- a/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir
+++ b/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
-# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=register-coalescer -o - %s | FileCheck %s
+# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=register-coalescer -o - %s | FileCheck %s --match-full-lines
---
name: rematerialize_subreg_to_reg_added_impdef_1
tracksRegLiveness: true
@@ -9,7 +9,7 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x2aaaaaab), %bb.2(0x55555555)
; CHECK-NEXT: liveins: $edi
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]]
; CHECK-NEXT: JCC_1 %bb.2, 5, implicit killed undef $eflags
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
@@ -28,7 +28,7 @@ body: |
; CHECK-NEXT: JCC_1 %bb.5, 5, implicit killed undef $eflags
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4:
- ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al
+ ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al, implicit-def $al
; CHECK-NEXT: RET 0, killed undef $al
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.5:
diff --git a/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir b/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir
new file mode 100644
index 0000000..2e6395f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir
@@ -0,0 +1,44 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+# RUN: llc -mtriple=x86_64-- -run-pass=register-coalescer -enable-subreg-liveness -verify-coalescing -o - %s | FileCheck %s
+
+---
+name: requires_new_subrange_coalesce_subreg_to_reg
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: requires_new_subrange_coalesce_subreg_to_reg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef %a.sub_32bit:gr64_with_sub_8bit = COPY $eax
+ ; CHECK-NEXT: %b:gr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: %c:gr64 = INSERT_SUBREG %a, %b, %subreg.sub_32bit
+ ; CHECK-NEXT: JCC_1 %bb.2, 4, implicit undef $eflags
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef %a.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: %c.sub_32bit:gr64 = COPY %a
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: %c.sub_32bit:gr64 = SUBREG_TO_REG %a, %b, %subreg.sub_32bit
+ ; CHECK-NEXT: RET 0, implicit %c
+ bb.0:
+ liveins: $eax
+ %init_eax:gr32 = COPY $eax
+ %a:gr64 = SUBREG_TO_REG 0, %init_eax, %subreg.sub_32bit
+ %b:gr32 = IMPLICIT_DEF
+ %c:gr64 = INSERT_SUBREG %a, %b, %subreg.sub_32bit
+ JCC_1 %bb.2, 4, implicit undef $eflags
+
+ bb.1:
+ %imm0:gr32 = MOV32r0 implicit-def dead $eflags
+ %a = SUBREG_TO_REG 0, %imm0, %subreg.sub_32bit
+ %c.sub_32bit = COPY %a
+
+ bb.2:
+ %c.sub_32bit = SUBREG_TO_REG %a, %b, %subreg.sub_32bit
+ RET 0, implicit %c
+
+...
diff --git a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
index 17de405..0f28964 100644
--- a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir
+++ b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
@@ -1,6 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=early-tailduplication -tail-dup-pred-size=1 -tail-dup-succ-size=1 %s -o - | FileCheck %s
-# Check that only the computed goto is not be restrict by tail-dup-pred-size and tail-dup-succ-size.
+#
+# Check that only the computed goto and others are restricted by tail-dup-pred-size and tail-dup-succ-size.
+#
--- |
@computed_goto.dispatch = constant [5 x ptr] [ptr null, ptr blockaddress(@computed_goto, %bb1), ptr blockaddress(@computed_goto, %bb2), ptr blockaddress(@computed_goto, %bb3), ptr blockaddress(@computed_goto, %bb4)]
declare i64 @f0()
@@ -30,54 +32,54 @@ tracksRegLiveness: true
body: |
; CHECK-LABEL: name: computed_goto
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64_nosp = COPY [[COPY2]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gr64_nosp = COPY [[COPY4]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY4]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_nosp = COPY [[COPY6]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY6]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY8:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gr64_nosp = COPY [[COPY8]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY8]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr64_nosp = PHI [[COPY]], %bb.0, [[COPY4]], %bb.4, [[COPY3]], %bb.3, [[COPY2]], %bb.2, [[COPY1]], %bb.1
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[PHI]], @computed_goto.dispatch, $noreg
bb.0:
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
diff --git a/llvm/test/CodeGen/X86/pr76416.ll b/llvm/test/CodeGen/X86/pr76416.ll
new file mode 100644
index 0000000..68e9ef9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr76416.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;
+; Reproducer from https://github.com/llvm/llvm-project/issues/76416
+;
+
+@load_p = external global ptr, align 8
+@load_data = external global i8, align 1
+
+define dso_local void @pr76416() {
+; CHECK-LABEL: pr76416:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jg .LBB0_3
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_2: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: incl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jle .LBB0_2
+; CHECK-NEXT: .LBB0_3: # %for.end
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq load_p@GOTPCREL(%rip), %rax
+; CHECK-NEXT: movq load_data@GOTPCREL(%rip), %rcx
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_4: # %for.cond1
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movq (%rax), %rdx
+; CHECK-NEXT: movslq -{{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: movzbl (%rdx,%rsi), %edx
+; CHECK-NEXT: movb %dl, (%rcx)
+; CHECK-NEXT: leal 1(%rsi), %edx
+; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jmp .LBB0_4
+entry:
+ %alloca = alloca i32, align 4
+ store i32 0, ptr %alloca, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %load.from.alloca.0 = load i32, ptr %alloca, align 4
+ %cmp = icmp slt i32 %load.from.alloca.0, 4
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ call void asm sideeffect "", "{ax},~{dirflag},~{fpsr},~{flags}"(i8 0) nounwind
+ %load.from.alloca.1 = load i32, ptr %alloca, align 4
+ %inc = add nsw i32 %load.from.alloca.1, 1
+ store i32 %inc, ptr %alloca, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ store i32 0, ptr %alloca, align 4
+ br label %for.cond1
+
+for.cond1: ; preds = %for.cond1, %for.end
+ call void asm sideeffect "", "N{dx},~{dirflag},~{fpsr},~{flags}"(i32 poison) nounwind
+ %load.from.load_p = load ptr, ptr @load_p, align 8
+ %regs = getelementptr inbounds { [4 x i8] }, ptr %load.from.load_p, i32 0, i32 0
+ %load.from.alloca.2 = load i32, ptr %alloca, align 4
+ %idxprom = sext i32 %load.from.alloca.2 to i64
+ %arrayidx = getelementptr inbounds [4 x i8], ptr %regs, i64 0, i64 %idxprom
+ %load.with.gep.ptr = load i8, ptr %arrayidx, align 1
+ store i8 %load.with.gep.ptr, ptr @load_data, align 1
+ %load.from.alloca.3 = load i32, ptr %alloca, align 4
+ %inc2 = add nsw i32 %load.from.alloca.3, 1
+ store i32 %inc2, ptr %alloca, align 4
+ br label %for.cond1
+}
diff --git a/llvm/test/CodeGen/X86/subreg-fail.mir b/llvm/test/CodeGen/X86/subreg-fail.mir
index c8146f0..dc69071 100644
--- a/llvm/test/CodeGen/X86/subreg-fail.mir
+++ b/llvm/test/CodeGen/X86/subreg-fail.mir
@@ -14,8 +14,8 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: test1
- ; CHECK: undef [[MOV32rm:%[0-9]+]].sub_32bit:gr64_nosp = MOV32rm undef %1:gr64, 1, $noreg, 0, $noreg :: (volatile load (s32) from `ptr undef`)
- ; CHECK-NEXT: undef [[MOV32rm1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rm undef %4:gr64, 1, $noreg, 0, $noreg :: (volatile load (s32) from `ptr undef`)
+ ; CHECK: undef [[MOV32rm:%[0-9]+]].sub_32bit:gr64_nosp = MOV32rm undef %1:gr64, 1, $noreg, 0, $noreg, implicit-def [[MOV32rm]] :: (volatile load (s32) from `ptr undef`)
+ ; CHECK-NEXT: undef [[MOV32rm1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rm undef %4:gr64, 1, $noreg, 0, $noreg, implicit-def [[MOV32rm1]] :: (volatile load (s32) from `ptr undef`)
; CHECK-NEXT: [[MOV32rm1:%[0-9]+]]:gr64_with_sub_8bit = SHL64ri [[MOV32rm1]], 32, implicit-def dead $eflags
; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_with_sub_8bit = LEA64r [[MOV32rm1]], 1, [[MOV32rm]], 256, $noreg
; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_with_sub_8bit = SHR64ri [[LEA64r]], 8, implicit-def dead $eflags
diff --git a/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir b/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir
new file mode 100644
index 0000000..e4fb812
--- /dev/null
+++ b/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir
@@ -0,0 +1,451 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=x86_64-- -run-pass=register-coalescer -o - %s | FileCheck %s --match-full-lines
+
+# We cannot lose the liveness of the high subregister of %1 when
+# coalesced with %0, so introduce an implicit-def of the super
+# register on the MOV.
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: subreg_to_reg_folds_to_undef
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $rax
+
+ ; CHECK-LABEL: name: subreg_to_reg_folds_to_undef
+ ; CHECK: liveins: $rax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rax
+ ; CHECK-NEXT: undef [[MOV32rr:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rr [[COPY]].sub_32bit, implicit-def [[MOV32rr]]
+ ; CHECK-NEXT: RET 0, implicit [[MOV32rr]]
+ %0:gr64 = COPY killed $rax
+ %1:gr32 = COPY killed %0.sub_32bit
+ %2:gr32 = MOV32rr killed %1
+ %3:gr64 = SUBREG_TO_REG 0, killed %2, %subreg.sub_32bit
+ %4:gr64 = COPY killed %3
+ RET 0, implicit %4
+
+...
+
+---
+name: coalesce_mov32r0_subreg_def_into_subreg_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_subreg_def_into_subreg_to_reg64
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ undef %0.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed %0.sub_32bit, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_def_with_super_def_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_def_with_super_def_to_reg64
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]], implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ undef %0.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def %0
+ %1:gr64 = SUBREG_TO_REG 0, killed %0.sub_32bit, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_already_defs_other_subreg
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_already_defs_other_subreg
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def undef [[MOV32r0_]].sub_8bit, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit [[MOV32r0_]]
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit undef $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags, implicit-def undef %0.sub_8bit
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ INLINEASM &"", 0, implicit %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit undef $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+
+# Reduced realistic case which was asserting after introducing new implicit-defs
+---
+name: coalesce_needs_implicit_defs
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_needs_implicit_defs
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $rdi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]], implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: undef [[MOV32r0_1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_1]], implicit-def [[MOV32r0_1]]
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[MOV32r0_2:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: TEST64rr [[MOV32r0_1]], [[MOV32r0_1]], implicit-def $eflags
+ ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]].sub_8bit:gr64_with_sub_8bit = SETCCr 4, implicit killed $eflags
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]]:gr64_with_sub_8bit = SHL64ri [[MOV32r0_2]], 4, implicit-def dead $eflags
+ ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]]:gr64_with_sub_8bit = ADD64rr [[MOV32r0_2]], [[COPY]], implicit-def dead $eflags
+ ; CHECK-NEXT: [[MOV32r0_1:%[0-9]+]]:gr64_with_sub_8bit = COPY [[MOV32r0_2]]
+ ; CHECK-NEXT: JMP_1 %bb.1
+ bb.0:
+ liveins: $rdi
+
+ %0:gr64 = COPY killed $rdi
+ %1:gr32 = MOV32r0 implicit-def dead $eflags
+ %2:gr64 = SUBREG_TO_REG 0, %1, %subreg.sub_32bit
+ %3:gr64 = COPY killed %2
+
+ bb.1:
+ %4:gr64 = COPY killed %3
+ %5:gr32 = MOV32r0 implicit-def dead $eflags
+ TEST64rr killed %4, %4, implicit-def $eflags
+ %6:gr8 = SETCCr 4, implicit killed $eflags
+ %7:gr32 = COPY killed %5
+ %7.sub_8bit:gr32 = COPY killed %6
+ %8:gr64 = SUBREG_TO_REG 0, killed %7, %subreg.sub_32bit
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %9:gr64 = SUBREG_TO_REG 0, %1, %subreg.sub_32bit
+ $rdi = COPY %9
+ CALL64r killed %9, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %10:gr64 = COPY killed %8
+ %10:gr64 = SHL64ri %10, 4, implicit-def dead $eflags
+ %11:gr64 = COPY killed %10
+ %11:gr64 = ADD64rr %11, %0, implicit-def dead $eflags
+ %3:gr64 = COPY killed %11
+ JMP_1 %bb.1
+
+...
+
+# Make sure to add the 'undef' flag to the result register %2,
+# because the top 32bits are not defined.
+---
+name: coalesce_add_implicitdef_and_undef
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_add_implicitdef_and_undef
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $eflags, $edx
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $edx
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = ADD32ri [[COPY]].sub_32bit, -34, implicit-def $eflags, implicit-def [[COPY]]
+ ; CHECK-NEXT: FAKE_USE [[COPY]]
+ ; CHECK-NEXT: RET 0
+ bb.0:
+ liveins: $eflags, $edx
+ %0:gr32 = COPY $edx
+ JMP_1 %bb.1
+
+ bb.1:
+ %1:gr32 = COPY %0
+ %1:gr32 = ADD32ri %1, -34, implicit-def $eflags
+ %2:gr64_with_sub_8bit = SUBREG_TO_REG 0, killed %1, %subreg.sub_32bit
+ FAKE_USE %2
+ RET 0
+...
+
+# We can't mark the destination register as 'undef' or add implicit-def
+# because the top 24 bits of %0:gr32 are retained by the SUBREG_TO_REG.
+#
+# For example, if this were to result in:
+#
+# undef %2.sub_32bit:gr64_with_sub_8bit = COPY $edx
+# %1:gr8 = SETCCr 4, implicit $eflags
+# JMP_1 %bb.1
+#
+# bb.1:
+# undef %2.sub_8bit:gr64_with_sub_8bit = COPY %1, implicit-def %2
+#
+# Then this says that the top 56 bits of %2 are undef. That's not correct
+# because only the top 32 bits are undef.
+---
+name: coalesce_dont_add_implicitdef_or_undef
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_dont_add_implicitdef_or_undef
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $eflags, $edx
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $edx
+ ; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub_8bit:gr64_with_sub_8bit = COPY [[SETCCr]]
+ ; CHECK-NEXT: FAKE_USE [[COPY]]
+ ; CHECK-NEXT: RET 0
+ bb.0:
+ liveins: $eflags, $edx
+ %0:gr32 = COPY $edx
+ %1:gr8 = SETCCr 4, implicit killed $eflags
+ JMP_1 %bb.1
+
+ bb.1:
+ %0.sub_8bit:gr32 = COPY %1
+ %2:gr64_with_sub_8bit = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ FAKE_USE %2
+ RET 0
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_physreg_def
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_physreg_def
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r killed $rdi, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags
+ $rdi = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ CALL64r killed $rdi, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_physreg_use
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $eax
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_physreg_use
+ ; CHECK: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, $eax, %subreg.sub_32bit
+ ; CHECK-NEXT: $rdi = COPY [[SUBREG_TO_REG]]
+ ; CHECK-NEXT: CALL64r [[SUBREG_TO_REG]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ $eax = MOV32r0 implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed $eax, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+# Coalesced instruction is a copy with other implicit operands
+---
+name: coalesce_copy_into_subreg_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $eax
+ ; CHECK-LABEL: name: coalesce_copy_into_subreg_to_reg64
+ ; CHECK: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $eax, implicit-def dead $eflags, implicit-def [[COPY]]
+ ; CHECK-NEXT: $rdi = COPY [[COPY]]
+ ; CHECK-NEXT: CALL64r [[COPY]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = COPY $eax, implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_multiple_redef_value
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_multiple_redef_value
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef [[MOV32r0_]].sub_32bit, implicit [[MOV32r0_]].sub_32bit, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: $rdi = COPY [[MOV32r0_]]
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags
+ INLINEASM &"", 0, implicit-def %0, implicit %0
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_def_is_block_liveout
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_def_is_block_liveout
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef %1.sub_32bit, implicit-def %1
+ ; CHECK-NEXT: JCC_1 %bb.1, 4, implicit undef $eflags
+ ; CHECK-NEXT: JMP_1 %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $rdi = COPY %1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ bb.0:
+ INLINEASM &"", 0, implicit-def %0:gr32
+ JCC_1 %bb.1, 4, implicit undef $eflags
+ JMP_1 %bb.2
+
+ bb.1:
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+ bb.2:
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_def_is_phi_def
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_def_is_phi_def
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef %1.sub_32bit, implicit-def %1
+ ; CHECK-NEXT: JCC_1 %bb.1, 4, implicit undef $eflags
+ ; CHECK-NEXT: JMP_1 %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $rdi = COPY %1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ bb.0:
+
+ INLINEASM &"", 0, implicit-def %0:gr32
+ JCC_1 %bb.1, 4, implicit undef $eflags
+ JMP_1 %bb.2
+
+ bb.1:
+ %1:gr64 = SUBREG_TO_REG 0, %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ JMP_1 %bb.1
+
+ bb.2:
+
+...