aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Bitcode/thinlto-alias-addrspacecast.ll7
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll812
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll42
-rw-r--r--llvm/test/CodeGen/AArch64/machine-combiner-copy.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/machine-licm-sub-loop.ll47
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-and-tst.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir1009
-rw-r--r--llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/tbl-loops.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/trampoline.ll6
l---------llvm/test/CodeGen/AMDGPU/.#llvm.amdgcn.smfmac.gfx950.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/agpr-copy-propagation.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/elf-header-flags-sramecc.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/mfma-no-register-aliasing.ll106
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll204
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-copy.ll2
-rw-r--r--llvm/test/CodeGen/ARM/combine-movc-sub.ll12
-rw-r--r--llvm/test/CodeGen/ARM/extract-bits.ll148
-rw-r--r--llvm/test/CodeGen/ARM/extract-lowbits.ll92
-rw-r--r--llvm/test/CodeGen/ARM/llround-conv.ll74
-rw-r--r--llvm/test/CodeGen/ARM/lround-conv.ll46
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-Flag-LargeNumber.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-Flags-LargeNumber.ll18
-rw-r--r--llvm/test/CodeGen/NVPTX/convert-sm103a.ll297
-rw-r--r--llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py2
-rw-r--r--llvm/test/CodeGen/NVPTX/wmma.py115
-rw-r--r--llvm/test/CodeGen/PowerPC/vec-nmsub.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store-fp.ll950
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll219
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv32.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv64.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/pr69586.ll204
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-trampoline.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll65
-rw-r--r--llvm/test/CodeGen/SystemZ/llvm.sincos.ll4
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll6
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.ll151
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll80
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination.ll13
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-float16regloops.ll82
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-float32regloops.ll100
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-increment.ll278
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-phireg.ll30
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll519
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-qrintrsplat.ll22
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll10
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll16
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll92
-rw-r--r--llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll82
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir14
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir32
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir77
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir77
-rw-r--r--llvm/test/CodeGen/X86/dag-update-nodetomatch.ll5
-rw-r--r--llvm/test/CodeGen/X86/delete-dead-instrs-with-live-uses.mir4
-rw-r--r--llvm/test/CodeGen/X86/inalloca-invoke.ll2
-rw-r--r--llvm/test/CodeGen/X86/licm-regpressure.ll62
-rw-r--r--llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll24
-rw-r--r--llvm/test/MC/AMDGPU/vop3-gfx9.s135
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3.txt21
-rw-r--r--llvm/test/Other/new-pm-lto-defaults.ll1
-rw-r--r--llvm/test/ThinLTO/X86/memprof-basic.ll5
-rw-r--r--llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll4
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_address_taken.ll40
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_no_address_taken.ll45
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse_lto.ll69
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion.ll141
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion1.ll98
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse_multinode_refscc.ll41
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse_self_recursive_callee.ll88
-rw-r--r--llvm/test/Transforms/InstCombine/clamp-to-minmax.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/select-gep.ll32
-rw-r--r--llvm/test/Transforms/InstCombine/select-safe-bool-transforms.ll27
-rw-r--r--llvm/test/Transforms/InstSimplify/domcondition.ll207
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/neon-inloop-reductions.ll121
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr162009.ll79
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/replicating-load-store-costs.ll84
-rw-r--r--llvm/test/Transforms/PGOProfile/memprof.ll19
-rw-r--r--llvm/test/Transforms/SCCP/relax-range-checks.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll32
-rw-r--r--llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll55
-rw-r--r--llvm/test/Transforms/SimplifyCFG/indirectbr.ll32
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll12
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll213
-rw-r--r--llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s10
-rw-r--r--llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s8
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s8
-rw-r--r--llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s8
-rw-r--r--llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s8
106 files changed, 5525 insertions, 2743 deletions
diff --git a/llvm/test/Bitcode/thinlto-alias-addrspacecast.ll b/llvm/test/Bitcode/thinlto-alias-addrspacecast.ll
new file mode 100644
index 0000000..fe4f05e
--- /dev/null
+++ b/llvm/test/Bitcode/thinlto-alias-addrspacecast.ll
@@ -0,0 +1,7 @@
+; RUN: opt -module-summary < %s | llvm-dis | FileCheck %s
+
+@__oclc_ABI_version = linkonce_odr hidden addrspace(4) constant i32 500, align 4
+@_ZL20__oclc_ABI_version__ = internal alias i32, addrspacecast (ptr addrspace(4) @__oclc_ABI_version to ptr)
+
+; CHECK: ^1 = gv: (name: "__oclc_ABI_version", summaries: (variable: (module: ^0, flags: {{.*}})))
+; CHECK: ^2 = gv: (name: "_ZL20__oclc_ABI_version__", summaries: (alias: (module: ^0, flags: {{.*}}, aliasee: ^1)))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll b/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll
index ed68723..41f7ab8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/split-wide-shifts-multiway.ll
@@ -1219,14 +1219,14 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
;
; GISEL-LABEL: test_shl_i1024:
; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: sub sp, sp, #416
-; GISEL-NEXT: stp x28, x27, [sp, #320] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x26, x25, [sp, #336] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x24, x23, [sp, #352] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x22, x21, [sp, #368] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x20, x19, [sp, #384] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x29, x30, [sp, #400] ; 16-byte Folded Spill
-; GISEL-NEXT: .cfi_def_cfa_offset 416
+; GISEL-NEXT: sub sp, sp, #432
+; GISEL-NEXT: stp x28, x27, [sp, #336] ; 16-byte Folded Spill
+; GISEL-NEXT: stp x26, x25, [sp, #352] ; 16-byte Folded Spill
+; GISEL-NEXT: stp x24, x23, [sp, #368] ; 16-byte Folded Spill
+; GISEL-NEXT: stp x22, x21, [sp, #384] ; 16-byte Folded Spill
+; GISEL-NEXT: stp x20, x19, [sp, #400] ; 16-byte Folded Spill
+; GISEL-NEXT: stp x29, x30, [sp, #416] ; 16-byte Folded Spill
+; GISEL-NEXT: .cfi_def_cfa_offset 432
; GISEL-NEXT: .cfi_offset w30, -8
; GISEL-NEXT: .cfi_offset w29, -16
; GISEL-NEXT: .cfi_offset w19, -24
@@ -1242,38 +1242,44 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: ldp x10, x11, [x1]
; GISEL-NEXT: mov w8, w2
; GISEL-NEXT: lsr x9, x8, #6
-; GISEL-NEXT: and x16, x8, #0x3f
+; GISEL-NEXT: and x12, x8, #0x3f
+; GISEL-NEXT: str x0, [sp, #144] ; 8-byte Folded Spill
+; GISEL-NEXT: and x14, x8, #0x3f
; GISEL-NEXT: mov w13, #64 ; =0x40
-; GISEL-NEXT: sub x21, x13, x16
-; GISEL-NEXT: str x0, [sp, #112] ; 8-byte Folded Spill
-; GISEL-NEXT: mov x24, x16
-; GISEL-NEXT: lsl x25, x10, x16
+; GISEL-NEXT: and x16, x8, #0x3f
+; GISEL-NEXT: lsl x0, x10, x12
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: lsr x26, x10, x21
-; GISEL-NEXT: lsl x2, x11, x16
-; GISEL-NEXT: lsr x23, x11, x21
-; GISEL-NEXT: mov x22, x21
-; GISEL-NEXT: csel x12, x25, xzr, eq
+; GISEL-NEXT: sub x2, x13, x14
+; GISEL-NEXT: lsr x3, x10, x2
+; GISEL-NEXT: lsl x6, x11, x14
+; GISEL-NEXT: and x14, x8, #0x3f
+; GISEL-NEXT: csel x12, x0, xzr, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: str x1, [sp, #312] ; 8-byte Folded Spill
+; GISEL-NEXT: lsr x20, x11, x2
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: str x23, [sp, #208] ; 8-byte Folded Spill
+; GISEL-NEXT: mov x24, x0
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: stp x24, x22, [sp, #40] ; 16-byte Folded Spill
+; GISEL-NEXT: mov x7, x3
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #4
+; GISEL-NEXT: mov x28, x1
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #5
+; GISEL-NEXT: and x21, x8, #0x3f
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #6
+; GISEL-NEXT: str x6, [sp, #24] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #7
+; GISEL-NEXT: str x28, [sp, #304] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #8
+; GISEL-NEXT: str x7, [sp, #272] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #9
+; GISEL-NEXT: str x20, [sp, #112] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #10
; GISEL-NEXT: csel x12, xzr, x12, eq
@@ -1290,13 +1296,13 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x10, x10, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x10, [sp, #192] ; 8-byte Folded Spill
-; GISEL-NEXT: csel x10, xzr, x26, eq
+; GISEL-NEXT: str x10, [sp, #232] ; 8-byte Folded Spill
+; GISEL-NEXT: csel x10, xzr, x3, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x10, x2, x10
+; GISEL-NEXT: orr x10, x6, x10
; GISEL-NEXT: csel x10, x10, xzr, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: csel x10, x25, x10, eq
+; GISEL-NEXT: csel x10, x0, x10, eq
; GISEL-NEXT: cmp x9, #2
; GISEL-NEXT: csel x10, xzr, x10, eq
; GISEL-NEXT: cmp x9, #3
@@ -1327,25 +1333,24 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x9, #15
; GISEL-NEXT: csel x13, xzr, x13, eq
; GISEL-NEXT: cmp x8, #0
-; GISEL-NEXT: lsl x20, x12, x16
+; GISEL-NEXT: lsl x26, x12, x14
; GISEL-NEXT: csel x11, x11, x13, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x11, [sp, #184] ; 8-byte Folded Spill
-; GISEL-NEXT: csel x11, xzr, x23, eq
+; GISEL-NEXT: str x11, [sp, #224] ; 8-byte Folded Spill
+; GISEL-NEXT: csel x11, xzr, x20, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x11, x20, x11
-; GISEL-NEXT: lsr x15, x12, x21
-; GISEL-NEXT: lsl x14, x10, x16
+; GISEL-NEXT: orr x11, x26, x11
+; GISEL-NEXT: lsr x15, x12, x2
+; GISEL-NEXT: lsl x30, x10, x16
; GISEL-NEXT: csel x11, x11, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: lsr x17, x10, x21
-; GISEL-NEXT: csel x13, xzr, x26, eq
+; GISEL-NEXT: lsr x17, x10, x2
+; GISEL-NEXT: csel x13, xzr, x3, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: str x20, [sp, #8] ; 8-byte Folded Spill
-; GISEL-NEXT: orr x13, x2, x13
+; GISEL-NEXT: orr x13, x6, x13
; GISEL-NEXT: csel x11, x13, x11, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: csel x11, x25, x11, eq
+; GISEL-NEXT: csel x11, x0, x11, eq
; GISEL-NEXT: cmp x9, #3
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #4
@@ -1375,23 +1380,23 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x11, [sp, #176] ; 8-byte Folded Spill
+; GISEL-NEXT: str x11, [sp, #216] ; 8-byte Folded Spill
; GISEL-NEXT: csel x11, xzr, x15, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x11, x14, x11
+; GISEL-NEXT: orr x11, x30, x11
; GISEL-NEXT: csel x11, x11, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x23, eq
+; GISEL-NEXT: csel x12, xzr, x20, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: orr x12, x20, x12
+; GISEL-NEXT: orr x12, x26, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x26, eq
+; GISEL-NEXT: csel x12, xzr, x3, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: orr x12, x2, x12
+; GISEL-NEXT: orr x12, x6, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: csel x11, x25, x11, eq
+; GISEL-NEXT: csel x11, x0, x11, eq
; GISEL-NEXT: cmp x9, #4
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #5
@@ -1421,33 +1426,33 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: lsl x0, x12, x16
; GISEL-NEXT: csel x10, x10, x13, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x10, [sp, #168] ; 8-byte Folded Spill
+; GISEL-NEXT: str x10, [sp, #208] ; 8-byte Folded Spill
; GISEL-NEXT: csel x10, xzr, x17, eq
; GISEL-NEXT: cmp x9, #0
; GISEL-NEXT: orr x10, x0, x10
-; GISEL-NEXT: lsr x27, x12, x21
+; GISEL-NEXT: lsr x4, x12, x2
; GISEL-NEXT: lsl x19, x11, x16
; GISEL-NEXT: csel x10, x10, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: lsr x3, x11, x21
+; GISEL-NEXT: mov x16, x15
; GISEL-NEXT: csel x13, xzr, x15, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: stp x27, x0, [sp, #240] ; 16-byte Folded Spill
-; GISEL-NEXT: orr x13, x14, x13
-; GISEL-NEXT: mov x7, x3
+; GISEL-NEXT: str x4, [sp, #248] ; 8-byte Folded Spill
+; GISEL-NEXT: orr x13, x30, x13
+; GISEL-NEXT: str x0, [sp, #48] ; 8-byte Folded Spill
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x23, eq
+; GISEL-NEXT: csel x13, xzr, x20, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: orr x13, x20, x13
+; GISEL-NEXT: orr x13, x26, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x26, eq
+; GISEL-NEXT: csel x13, xzr, x3, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: orr x13, x2, x13
+; GISEL-NEXT: orr x13, x6, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: csel x10, x25, x10, eq
+; GISEL-NEXT: csel x10, x24, x10, eq
; GISEL-NEXT: cmp x9, #5
; GISEL-NEXT: csel x10, xzr, x10, eq
; GISEL-NEXT: cmp x9, #6
@@ -1473,8 +1478,8 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x10, x12, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x10, [sp, #160] ; 8-byte Folded Spill
-; GISEL-NEXT: csel x10, xzr, x27, eq
+; GISEL-NEXT: str x10, [sp, #200] ; 8-byte Folded Spill
+; GISEL-NEXT: csel x10, xzr, x4, eq
; GISEL-NEXT: cmp x9, #0
; GISEL-NEXT: orr x10, x19, x10
; GISEL-NEXT: csel x10, x10, xzr, eq
@@ -1486,20 +1491,22 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x12, xzr, x15, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: orr x12, x14, x12
+; GISEL-NEXT: and x15, x8, #0x3f
+; GISEL-NEXT: orr x12, x30, x12
; GISEL-NEXT: csel x10, x12, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x23, eq
+; GISEL-NEXT: csel x12, xzr, x20, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: orr x12, x20, x12
+; GISEL-NEXT: orr x12, x26, x12
; GISEL-NEXT: csel x10, x12, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x26, eq
+; GISEL-NEXT: csel x12, xzr, x3, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: orr x12, x2, x12
+; GISEL-NEXT: lsr x3, x11, x2
+; GISEL-NEXT: orr x12, x6, x12
; GISEL-NEXT: csel x10, x12, x10, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: csel x10, x25, x10, eq
+; GISEL-NEXT: csel x10, x24, x10, eq
; GISEL-NEXT: cmp x9, #6
; GISEL-NEXT: csel x10, xzr, x10, eq
; GISEL-NEXT: cmp x9, #7
@@ -1522,21 +1529,23 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x9, #15
; GISEL-NEXT: csel x13, xzr, x13, eq
; GISEL-NEXT: cmp x8, #0
-; GISEL-NEXT: lsl x4, x12, x16
+; GISEL-NEXT: lsl x22, x12, x15
; GISEL-NEXT: csel x11, x11, x13, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x11, [sp, #152] ; 8-byte Folded Spill
+; GISEL-NEXT: str x11, [sp, #192] ; 8-byte Folded Spill
; GISEL-NEXT: csel x11, xzr, x3, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x11, x4, x11
-; GISEL-NEXT: lsl x30, x10, x16
-; GISEL-NEXT: lsr x28, x10, x21
+; GISEL-NEXT: orr x11, x22, x11
+; GISEL-NEXT: lsl x5, x10, x15
+; GISEL-NEXT: lsr x27, x10, x2
; GISEL-NEXT: csel x11, x11, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x27, eq
+; GISEL-NEXT: csel x13, xzr, x4, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: str x30, [sp, #200] ; 8-byte Folded Spill
+; GISEL-NEXT: mov x25, x27
; GISEL-NEXT: orr x13, x19, x13
+; GISEL-NEXT: mov x14, x5
+; GISEL-NEXT: str x27, [sp, #328] ; 8-byte Folded Spill
; GISEL-NEXT: csel x11, x13, x11, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x13, xzr, x17, eq
@@ -1544,30 +1553,29 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: orr x13, x0, x13
; GISEL-NEXT: csel x11, x13, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x15, eq
+; GISEL-NEXT: csel x13, xzr, x16, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: orr x13, x14, x13
+; GISEL-NEXT: orr x13, x30, x13
; GISEL-NEXT: csel x11, x13, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x23, eq
+; GISEL-NEXT: csel x13, xzr, x20, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: orr x13, x20, x13
+; GISEL-NEXT: orr x13, x26, x13
; GISEL-NEXT: csel x11, x13, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x26, eq
+; GISEL-NEXT: csel x13, xzr, x7, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: orr x13, x2, x13
+; GISEL-NEXT: orr x13, x6, x13
; GISEL-NEXT: csel x11, x13, x11, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: lsr x13, x12, x21
-; GISEL-NEXT: csel x11, x25, x11, eq
+; GISEL-NEXT: lsr x13, x12, x2
+; GISEL-NEXT: csel x11, x24, x11, eq
; GISEL-NEXT: cmp x9, #7
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: mov x6, x13
+; GISEL-NEXT: mov x15, x13
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #9
-; GISEL-NEXT: str x6, [sp, #256] ; 8-byte Folded Spill
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #10
; GISEL-NEXT: csel x11, xzr, x11, eq
@@ -1584,18 +1592,18 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x11, [sp, #144] ; 8-byte Folded Spill
+; GISEL-NEXT: str x11, [sp, #184] ; 8-byte Folded Spill
; GISEL-NEXT: csel x11, xzr, x13, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x11, x30, x11
+; GISEL-NEXT: orr x11, x5, x11
; GISEL-NEXT: csel x11, x11, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x12, xzr, x3, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: orr x12, x4, x12
+; GISEL-NEXT: orr x12, x22, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x27, eq
+; GISEL-NEXT: csel x12, xzr, x4, eq
; GISEL-NEXT: cmp x9, #2
; GISEL-NEXT: orr x12, x19, x12
; GISEL-NEXT: csel x11, x12, x11, eq
@@ -1605,22 +1613,22 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: orr x12, x0, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x15, eq
+; GISEL-NEXT: csel x12, xzr, x16, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: orr x12, x14, x12
+; GISEL-NEXT: orr x12, x30, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x23, eq
+; GISEL-NEXT: csel x12, xzr, x20, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: orr x12, x20, x12
+; GISEL-NEXT: orr x12, x26, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x26, eq
+; GISEL-NEXT: csel x12, xzr, x7, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: orr x12, x2, x12
+; GISEL-NEXT: orr x12, x6, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: csel x11, x25, x11, eq
+; GISEL-NEXT: csel x11, x24, x11, eq
; GISEL-NEXT: cmp x9, #8
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #9
@@ -1635,39 +1643,34 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #14
; GISEL-NEXT: csel x12, xzr, x11, eq
-; GISEL-NEXT: ldp x11, x5, [x1, #64]
+; GISEL-NEXT: ldp x11, x1, [x1, #64]
; GISEL-NEXT: cmp x9, #15
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x12, x10, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: lsl x21, x11, x16
-; GISEL-NEXT: str x12, [sp, #136] ; 8-byte Folded Spill
-; GISEL-NEXT: csel x12, xzr, x28, eq
+; GISEL-NEXT: lsl x23, x11, x21
+; GISEL-NEXT: str x12, [sp, #176] ; 8-byte Folded Spill
+; GISEL-NEXT: csel x12, xzr, x27, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x12, x21, x12
-; GISEL-NEXT: lsr x10, x11, x22
-; GISEL-NEXT: mov x16, x19
+; GISEL-NEXT: orr x12, x23, x12
+; GISEL-NEXT: lsr x21, x11, x2
+; GISEL-NEXT: str x23, [sp, #288] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, x12, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: mov x1, x16
; GISEL-NEXT: csel x13, xzr, x13, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: str x16, [sp, #304] ; 8-byte Folded Spill
-; GISEL-NEXT: orr x13, x30, x13
+; GISEL-NEXT: orr x13, x5, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x13, xzr, x3, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: lsl x3, x5, x24
-; GISEL-NEXT: orr x13, x4, x13
+; GISEL-NEXT: orr x13, x22, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: stp x21, x3, [sp, #216] ; 16-byte Folded Spill
-; GISEL-NEXT: csel x13, xzr, x27, eq
+; GISEL-NEXT: csel x13, xzr, x4, eq
; GISEL-NEXT: cmp x9, #3
; GISEL-NEXT: orr x13, x19, x13
-; GISEL-NEXT: mov x19, x28
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x13, xzr, x17, eq
@@ -1675,27 +1678,30 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: orr x13, x0, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x15, eq
+; GISEL-NEXT: csel x13, xzr, x16, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: orr x13, x14, x13
+; GISEL-NEXT: orr x13, x30, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x23, eq
+; GISEL-NEXT: csel x13, xzr, x20, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: orr x13, x20, x13
+; GISEL-NEXT: orr x13, x26, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x26, eq
+; GISEL-NEXT: csel x13, xzr, x7, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: orr x13, x2, x13
+; GISEL-NEXT: orr x13, x6, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: csel x12, x25, x12, eq
+; GISEL-NEXT: and x13, x8, #0x3f
+; GISEL-NEXT: csel x12, x24, x12, eq
; GISEL-NEXT: cmp x9, #9
+; GISEL-NEXT: lsl x10, x1, x13
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #10
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #11
+; GISEL-NEXT: stp x10, x15, [sp, #312] ; 16-byte Folded Spill
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #12
; GISEL-NEXT: csel x12, xzr, x12, eq
@@ -1708,69 +1714,69 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x11, x11, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x11, [sp, #128] ; 8-byte Folded Spill
-; GISEL-NEXT: csel x11, xzr, x10, eq
+; GISEL-NEXT: str x11, [sp, #168] ; 8-byte Folded Spill
+; GISEL-NEXT: csel x11, xzr, x21, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x11, x3, x11
+; GISEL-NEXT: orr x11, x10, x11
+; GISEL-NEXT: mov x10, x23
; GISEL-NEXT: csel x11, x11, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x28, eq
+; GISEL-NEXT: csel x12, xzr, x27, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: mov x28, x4
-; GISEL-NEXT: orr x12, x21, x12
-; GISEL-NEXT: str x28, [sp, #32] ; 8-byte Folded Spill
+; GISEL-NEXT: mov x27, x24
+; GISEL-NEXT: orr x12, x23, x12
+; GISEL-NEXT: mov x23, x15
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x6, eq
+; GISEL-NEXT: csel x12, xzr, x15, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: orr x12, x30, x12
+; GISEL-NEXT: mov x15, x22
+; GISEL-NEXT: orr x12, x5, x12
+; GISEL-NEXT: mov x5, x3
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x7, eq
+; GISEL-NEXT: stp x14, x5, [sp, #256] ; 16-byte Folded Spill
+; GISEL-NEXT: csel x12, xzr, x3, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: orr x12, x4, x12
-; GISEL-NEXT: mov x4, x20
+; GISEL-NEXT: mov x5, x4
+; GISEL-NEXT: orr x12, x22, x12
+; GISEL-NEXT: lsr x22, x1, x2
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x27, eq
+; GISEL-NEXT: csel x12, xzr, x4, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: mov x27, x2
-; GISEL-NEXT: orr x12, x16, x12
-; GISEL-NEXT: mov x16, x17
+; GISEL-NEXT: str x22, [sp, #240] ; 8-byte Folded Spill
+; GISEL-NEXT: orr x12, x19, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x12, xzr, x17, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: mov x17, x15
; GISEL-NEXT: orr x12, x0, x12
-; GISEL-NEXT: lsr x0, x5, x22
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x15, eq
+; GISEL-NEXT: csel x12, xzr, x16, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: ldr x15, [sp, #312] ; 8-byte Folded Reload
-; GISEL-NEXT: orr x12, x14, x12
-; GISEL-NEXT: str x0, [sp, #280] ; 8-byte Folded Spill
+; GISEL-NEXT: orr x12, x30, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x23, eq
+; GISEL-NEXT: csel x12, xzr, x20, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: mov x23, x25
-; GISEL-NEXT: orr x12, x20, x12
-; GISEL-NEXT: str x23, [sp, #288] ; 8-byte Folded Spill
+; GISEL-NEXT: orr x12, x26, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x26, eq
+; GISEL-NEXT: csel x12, xzr, x7, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: orr x12, x2, x12
-; GISEL-NEXT: mov x2, x3
+; GISEL-NEXT: mov x7, x14
+; GISEL-NEXT: orr x12, x6, x12
+; GISEL-NEXT: mov x6, x28
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: cmp x9, #9
-; GISEL-NEXT: csel x11, x25, x11, eq
+; GISEL-NEXT: csel x11, x24, x11, eq
; GISEL-NEXT: cmp x9, #10
-; GISEL-NEXT: mov x25, x26
+; GISEL-NEXT: ldr x24, [x6, #88]
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #11
+; GISEL-NEXT: ldr x6, [sp, #272] ; 8-byte Folded Reload
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #12
; GISEL-NEXT: csel x11, xzr, x11, eq
@@ -1780,80 +1786,84 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #15
; GISEL-NEXT: csel x12, xzr, x11, eq
+; GISEL-NEXT: ldr x11, [x28, #80]
; GISEL-NEXT: cmp x8, #0
-; GISEL-NEXT: csel x12, x5, x12, eq
-; GISEL-NEXT: ldp x11, x5, [x15, #80]
+; GISEL-NEXT: csel x12, x1, x12, eq
+; GISEL-NEXT: mov x28, x2
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x12, [sp, #120] ; 8-byte Folded Spill
-; GISEL-NEXT: mov x15, x7
-; GISEL-NEXT: csel x12, xzr, x0, eq
+; GISEL-NEXT: lsl x2, x11, x13
+; GISEL-NEXT: str x12, [sp, #160] ; 8-byte Folded Spill
+; GISEL-NEXT: csel x12, xzr, x22, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: str x15, [sp, #24] ; 8-byte Folded Spill
-; GISEL-NEXT: lsl x20, x11, x24
-; GISEL-NEXT: orr x12, x20, x12
-; GISEL-NEXT: str x20, [sp, #232] ; 8-byte Folded Spill
+; GISEL-NEXT: ldr x1, [sp, #312] ; 8-byte Folded Reload
+; GISEL-NEXT: str x28, [sp, #16] ; 8-byte Folded Spill
+; GISEL-NEXT: orr x12, x2, x12
+; GISEL-NEXT: str x2, [sp, #280] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, x12, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x10, eq
+; GISEL-NEXT: csel x13, xzr, x21, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: orr x13, x3, x13
-; GISEL-NEXT: lsl x3, x5, x24
+; GISEL-NEXT: orr x13, x1, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x19, eq
+; GISEL-NEXT: csel x13, xzr, x25, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: stp x19, x3, [sp, #264] ; 16-byte Folded Spill
-; GISEL-NEXT: orr x13, x21, x13
+; GISEL-NEXT: mov x25, x16
+; GISEL-NEXT: orr x13, x10, x13
+; GISEL-NEXT: mov x10, x30
+; GISEL-NEXT: str x25, [sp, #80] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x6, eq
+; GISEL-NEXT: csel x13, xzr, x23, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: orr x13, x30, x13
+; GISEL-NEXT: mov x23, x3
+; GISEL-NEXT: orr x13, x14, x13
+; GISEL-NEXT: mov x14, x17
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x7, eq
-; GISEL-NEXT: ldp x7, x30, [sp, #240] ; 16-byte Folded Reload
+; GISEL-NEXT: stp x19, x14, [sp, #64] ; 16-byte Folded Spill
+; GISEL-NEXT: csel x13, xzr, x3, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: orr x13, x28, x13
+; GISEL-NEXT: mov x3, x21
+; GISEL-NEXT: orr x13, x15, x13
+; GISEL-NEXT: str x3, [sp, #32] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x7, eq
+; GISEL-NEXT: csel x13, xzr, x4, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: orr x13, x1, x13
-; GISEL-NEXT: mov x1, x14
+; GISEL-NEXT: mov x4, x0
+; GISEL-NEXT: orr x13, x19, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x16, eq
+; GISEL-NEXT: csel x13, xzr, x17, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: orr x13, x30, x13
+; GISEL-NEXT: mov x17, x27
+; GISEL-NEXT: orr x13, x0, x13
+; GISEL-NEXT: ldr x0, [sp, #24] ; 8-byte Folded Reload
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x17, eq
+; GISEL-NEXT: csel x13, xzr, x16, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: orr x13, x14, x13
-; GISEL-NEXT: ldr x14, [sp, #208] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x13, x30, x13
+; GISEL-NEXT: ldp x30, x16, [sp, #320] ; 16-byte Folded Reload
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x14, eq
+; GISEL-NEXT: csel x13, xzr, x20, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: orr x13, x4, x13
-; GISEL-NEXT: mov x4, x10
+; GISEL-NEXT: orr x13, x26, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x26, eq
+; GISEL-NEXT: csel x13, xzr, x6, eq
; GISEL-NEXT: cmp x9, #9
-; GISEL-NEXT: mov x26, x27
-; GISEL-NEXT: orr x13, x27, x13
-; GISEL-NEXT: lsr x27, x11, x22
+; GISEL-NEXT: orr x13, x0, x13
; GISEL-NEXT: csel x12, x13, x12, eq
; GISEL-NEXT: cmp x9, #10
-; GISEL-NEXT: mov x13, x23
-; GISEL-NEXT: csel x12, x23, x12, eq
+; GISEL-NEXT: lsr x13, x11, x28
+; GISEL-NEXT: csel x12, x27, x12, eq
; GISEL-NEXT: cmp x9, #11
-; GISEL-NEXT: str x27, [sp, #64] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #12
-; GISEL-NEXT: mov x23, x20
+; GISEL-NEXT: str x13, [sp, #96] ; 8-byte Folded Spill
; GISEL-NEXT: csel x12, xzr, x12, eq
; GISEL-NEXT: cmp x9, #13
; GISEL-NEXT: csel x12, xzr, x12, eq
@@ -1864,71 +1874,77 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x11, x11, x12, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x11, [sp, #104] ; 8-byte Folded Spill
-; GISEL-NEXT: csel x11, xzr, x27, eq
+; GISEL-NEXT: str x11, [sp, #152] ; 8-byte Folded Spill
+; GISEL-NEXT: and x11, x8, #0x3f
+; GISEL-NEXT: lsl x27, x24, x11
+; GISEL-NEXT: csel x11, xzr, x13, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x11, x3, x11
+; GISEL-NEXT: orr x11, x27, x11
+; GISEL-NEXT: str x27, [sp, #56] ; 8-byte Folded Spill
; GISEL-NEXT: csel x11, x11, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x0, eq
+; GISEL-NEXT: csel x12, xzr, x22, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: mov x0, x7
-; GISEL-NEXT: orr x12, x20, x12
-; GISEL-NEXT: mov x20, x16
+; GISEL-NEXT: mov x22, x2
+; GISEL-NEXT: orr x12, x2, x12
+; GISEL-NEXT: mov x2, x14
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x10, eq
+; GISEL-NEXT: csel x12, xzr, x21, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: ldr x10, [sp, #312] ; 8-byte Folded Reload
-; GISEL-NEXT: orr x12, x2, x12
-; GISEL-NEXT: ldr x2, [sp, #304] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x21, [sp, #288] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x12, x1, x12
+; GISEL-NEXT: mov x1, x27
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x19, eq
+; GISEL-NEXT: csel x12, xzr, x16, eq
; GISEL-NEXT: cmp x9, #3
; GISEL-NEXT: orr x12, x21, x12
-; GISEL-NEXT: ldr x21, [sp, #200] ; 8-byte Folded Reload
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x6, eq
+; GISEL-NEXT: csel x12, xzr, x30, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: orr x12, x21, x12
+; GISEL-NEXT: orr x12, x7, x12
+; GISEL-NEXT: mov x7, x15
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x15, eq
+; GISEL-NEXT: str x7, [sp, #40] ; 8-byte Folded Spill
+; GISEL-NEXT: csel x12, xzr, x23, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: orr x12, x28, x12
+; GISEL-NEXT: orr x12, x15, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x7, eq
+; GISEL-NEXT: csel x12, xzr, x5, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: mov x7, x17
-; GISEL-NEXT: orr x12, x2, x12
+; GISEL-NEXT: mov x5, x19
+; GISEL-NEXT: orr x12, x19, x12
+; GISEL-NEXT: mov x19, x7
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x16, eq
+; GISEL-NEXT: csel x12, xzr, x14, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: orr x12, x30, x12
+; GISEL-NEXT: lsr x14, x24, x28
+; GISEL-NEXT: orr x12, x4, x12
+; GISEL-NEXT: mov x4, x10
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x17, eq
+; GISEL-NEXT: csel x12, xzr, x25, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: mov x17, x24
-; GISEL-NEXT: orr x12, x1, x12
+; GISEL-NEXT: orr x12, x10, x12
+; GISEL-NEXT: ldr x10, [sp, #304] ; 8-byte Folded Reload
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x14, eq
-; GISEL-NEXT: ldr x14, [sp, #8] ; 8-byte Folded Reload
+; GISEL-NEXT: csel x12, xzr, x20, eq
; GISEL-NEXT: cmp x9, #9
-; GISEL-NEXT: orr x12, x14, x12
+; GISEL-NEXT: orr x12, x26, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x12, xzr, x25, eq
+; GISEL-NEXT: csel x12, xzr, x6, eq
; GISEL-NEXT: cmp x9, #10
-; GISEL-NEXT: orr x12, x26, x12
+; GISEL-NEXT: orr x12, x0, x12
; GISEL-NEXT: csel x11, x12, x11, eq
; GISEL-NEXT: cmp x9, #11
-; GISEL-NEXT: csel x11, x13, x11, eq
+; GISEL-NEXT: csel x11, x17, x11, eq
; GISEL-NEXT: cmp x9, #12
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #13
@@ -1937,393 +1953,395 @@ define void @test_shl_i1024(ptr %result, ptr %input, i32 %shift) {
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #15
; GISEL-NEXT: csel x12, xzr, x11, eq
+; GISEL-NEXT: ldp x11, x6, [x10, #96]
; GISEL-NEXT: cmp x8, #0
-; GISEL-NEXT: ldp x11, x10, [x10, #96]
-; GISEL-NEXT: csel x12, x5, x12, eq
-; GISEL-NEXT: str x12, [sp, #96] ; 8-byte Folded Spill
-; GISEL-NEXT: mov x12, x22
-; GISEL-NEXT: lsr x22, x5, x22
-; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: mov x5, x27
-; GISEL-NEXT: lsl x24, x11, x24
-; GISEL-NEXT: str x10, [sp, #296] ; 8-byte Folded Spill
-; GISEL-NEXT: csel x10, xzr, x22, eq
+; GISEL-NEXT: and x10, x8, #0x3f
+; GISEL-NEXT: csel x12, x24, x12, eq
+; GISEL-NEXT: tst x8, #0x3f
+; GISEL-NEXT: ldr x24, [sp, #248] ; 8-byte Folded Reload
+; GISEL-NEXT: lsl x15, x11, x10
+; GISEL-NEXT: csel x10, xzr, x14, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: str x22, [sp, #16] ; 8-byte Folded Spill
-; GISEL-NEXT: orr x10, x24, x10
+; GISEL-NEXT: str x12, [sp, #136] ; 8-byte Folded Spill
+; GISEL-NEXT: ldr x12, [sp, #312] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x10, x15, x10
+; GISEL-NEXT: str x15, [sp, #296] ; 8-byte Folded Spill
+; GISEL-NEXT: mov x15, x13
; GISEL-NEXT: csel x10, x10, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x27, eq
+; GISEL-NEXT: csel x13, xzr, x13, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: ldr x27, [sp, #280] ; 8-byte Folded Reload
-; GISEL-NEXT: orr x13, x3, x13
-; GISEL-NEXT: mov x3, x26
+; GISEL-NEXT: orr x13, x27, x13
+; GISEL-NEXT: ldr x27, [sp, #240] ; 8-byte Folded Reload
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x13, xzr, x27, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: orr x13, x23, x13
-; GISEL-NEXT: mov x23, x4
+; GISEL-NEXT: orr x13, x22, x13
+; GISEL-NEXT: ldr x22, [sp, #272] ; 8-byte Folded Reload
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x4, eq
-; GISEL-NEXT: ldp x4, x16, [sp, #216] ; 16-byte Folded Reload
+; GISEL-NEXT: csel x13, xzr, x3, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: orr x13, x16, x13
+; GISEL-NEXT: orr x13, x12, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x19, eq
+; GISEL-NEXT: csel x13, xzr, x16, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: mov x19, x1
-; GISEL-NEXT: orr x13, x4, x13
+; GISEL-NEXT: mov x16, x17
+; GISEL-NEXT: orr x13, x21, x13
+; GISEL-NEXT: ldp x23, x21, [sp, #256] ; 16-byte Folded Reload
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x6, eq
+; GISEL-NEXT: csel x13, xzr, x30, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: mov x6, x14
-; GISEL-NEXT: orr x13, x21, x13
+; GISEL-NEXT: mov x30, x0
+; GISEL-NEXT: orr x13, x23, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x15, eq
+; GISEL-NEXT: csel x13, xzr, x21, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: orr x13, x28, x13
+; GISEL-NEXT: orr x13, x7, x13
+; GISEL-NEXT: mov x7, x14
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x0, eq
+; GISEL-NEXT: csel x13, xzr, x24, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: mov x0, x23
-; GISEL-NEXT: orr x13, x2, x13
+; GISEL-NEXT: orr x13, x5, x13
+; GISEL-NEXT: ldr x5, [sp, #48] ; 8-byte Folded Reload
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x20, eq
+; GISEL-NEXT: csel x13, xzr, x2, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: orr x13, x30, x13
-; GISEL-NEXT: ldr x30, [sp, #208] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x2, [sp, #296] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x13, x5, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x7, eq
+; GISEL-NEXT: csel x13, xzr, x25, eq
; GISEL-NEXT: cmp x9, #9
-; GISEL-NEXT: orr x13, x1, x13
+; GISEL-NEXT: mov x25, x6
+; GISEL-NEXT: orr x13, x4, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x30, eq
+; GISEL-NEXT: csel x13, xzr, x20, eq
; GISEL-NEXT: cmp x9, #10
-; GISEL-NEXT: orr x13, x14, x13
-; GISEL-NEXT: ldp x14, x2, [sp, #264] ; 16-byte Folded Reload
+; GISEL-NEXT: orr x13, x26, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x13, xzr, x25, eq
+; GISEL-NEXT: csel x13, xzr, x22, eq
; GISEL-NEXT: cmp x9, #11
-; GISEL-NEXT: orr x13, x26, x13
-; GISEL-NEXT: ldr x26, [sp, #288] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x13, x0, x13
; GISEL-NEXT: csel x10, x13, x10, eq
; GISEL-NEXT: cmp x9, #12
-; GISEL-NEXT: lsr x13, x11, x12
-; GISEL-NEXT: csel x10, x26, x10, eq
+; GISEL-NEXT: lsr x13, x11, x28
+; GISEL-NEXT: csel x10, x17, x10, eq
; GISEL-NEXT: cmp x9, #13
+; GISEL-NEXT: ldr x17, [sp, #80] ; 8-byte Folded Reload
; GISEL-NEXT: csel x10, xzr, x10, eq
; GISEL-NEXT: cmp x9, #14
-; GISEL-NEXT: str x13, [sp, #72] ; 8-byte Folded Spill
+; GISEL-NEXT: str x13, [sp, #104] ; 8-byte Folded Spill
; GISEL-NEXT: csel x10, xzr, x10, eq
; GISEL-NEXT: cmp x9, #15
; GISEL-NEXT: csel x10, xzr, x10, eq
; GISEL-NEXT: cmp x8, #0
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: str x10, [sp, #88] ; 8-byte Folded Spill
-; GISEL-NEXT: ldr x10, [sp, #296] ; 8-byte Folded Reload
-; GISEL-NEXT: lsl x11, x10, x17
+; GISEL-NEXT: str x10, [sp, #128] ; 8-byte Folded Spill
+; GISEL-NEXT: and x10, x8, #0x3f
+; GISEL-NEXT: lsl x11, x6, x10
; GISEL-NEXT: csel x10, xzr, x13, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: ldr x17, [sp, #232] ; 8-byte Folded Reload
-; GISEL-NEXT: ldr x13, [sp, #256] ; 8-byte Folded Reload
+; GISEL-NEXT: ldp x0, x13, [sp, #280] ; 16-byte Folded Reload
+; GISEL-NEXT: mov x6, x16
; GISEL-NEXT: orr x10, x11, x10
-; GISEL-NEXT: str x11, [sp, #56] ; 8-byte Folded Spill
+; GISEL-NEXT: str x11, [sp, #88] ; 8-byte Folded Spill
; GISEL-NEXT: csel x10, x10, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x22, eq
+; GISEL-NEXT: csel x11, xzr, x14, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: orr x11, x24, x11
+; GISEL-NEXT: orr x11, x2, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x5, eq
+; GISEL-NEXT: csel x11, xzr, x15, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: orr x11, x2, x11
-; GISEL-NEXT: ldp x12, x5, [sp, #240] ; 16-byte Folded Reload
+; GISEL-NEXT: mov x15, x3
+; GISEL-NEXT: orr x11, x1, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x11, xzr, x27, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: mov x27, x30
-; GISEL-NEXT: orr x11, x17, x11
+; GISEL-NEXT: orr x11, x0, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x23, eq
+; GISEL-NEXT: csel x11, xzr, x3, eq
+; GISEL-NEXT: ldp x14, x3, [sp, #320] ; 16-byte Folded Reload
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: mov x23, x20
-; GISEL-NEXT: orr x11, x16, x11
-; GISEL-NEXT: ldr x16, [sp, #304] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x11, x12, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x14, eq
+; GISEL-NEXT: csel x11, xzr, x3, eq
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: orr x11, x4, x11
+; GISEL-NEXT: orr x11, x13, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x13, eq
+; GISEL-NEXT: csel x11, xzr, x14, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: orr x11, x21, x11
-; GISEL-NEXT: ldr x21, [sp, #296] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x11, x23, x11
+; GISEL-NEXT: mov x23, x5
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x15, eq
+; GISEL-NEXT: csel x11, xzr, x21, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: orr x11, x28, x11
+; GISEL-NEXT: mov x21, x4
+; GISEL-NEXT: orr x11, x19, x11
+; GISEL-NEXT: ldp x12, x19, [sp, #64] ; 16-byte Folded Reload
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x12, eq
+; GISEL-NEXT: csel x11, xzr, x24, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: orr x11, x16, x11
+; GISEL-NEXT: orr x11, x12, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x20, eq
+; GISEL-NEXT: csel x11, xzr, x19, eq
; GISEL-NEXT: cmp x9, #9
; GISEL-NEXT: orr x11, x5, x11
+; GISEL-NEXT: mov x5, x30
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x7, eq
+; GISEL-NEXT: csel x11, xzr, x17, eq
; GISEL-NEXT: cmp x9, #10
-; GISEL-NEXT: orr x11, x1, x11
-; GISEL-NEXT: ldr x1, [sp, #312] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x11, x4, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x30, eq
+; GISEL-NEXT: csel x11, xzr, x20, eq
; GISEL-NEXT: cmp x9, #11
-; GISEL-NEXT: orr x11, x6, x11
+; GISEL-NEXT: orr x11, x26, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x25, eq
+; GISEL-NEXT: csel x11, xzr, x22, eq
; GISEL-NEXT: cmp x9, #12
-; GISEL-NEXT: orr x11, x3, x11
+; GISEL-NEXT: orr x11, x30, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: cmp x9, #13
-; GISEL-NEXT: csel x10, x26, x10, eq
+; GISEL-NEXT: csel x10, x16, x10, eq
; GISEL-NEXT: cmp x9, #14
+; GISEL-NEXT: ldr x16, [sp, #304] ; 8-byte Folded Reload
; GISEL-NEXT: csel x10, xzr, x10, eq
; GISEL-NEXT: cmp x9, #15
; GISEL-NEXT: csel x11, xzr, x10, eq
; GISEL-NEXT: cmp x8, #0
-; GISEL-NEXT: csel x11, x21, x11, eq
-; GISEL-NEXT: ldp x10, x20, [x1, #112]
-; GISEL-NEXT: str x11, [sp, #80] ; 8-byte Folded Spill
-; GISEL-NEXT: ldp x11, x4, [sp, #40] ; 16-byte Folded Reload
+; GISEL-NEXT: ldp x10, x4, [x16, #112]
+; GISEL-NEXT: csel x11, x25, x11, eq
+; GISEL-NEXT: str x11, [sp, #120] ; 8-byte Folded Spill
+; GISEL-NEXT: lsr x11, x25, x28
+; GISEL-NEXT: and x16, x8, #0x3f
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: lsr x21, x21, x4
-; GISEL-NEXT: lsl x28, x10, x11
-; GISEL-NEXT: csel x1, xzr, x21, eq
-; GISEL-NEXT: str x21, [sp, #296] ; 8-byte Folded Spill
+; GISEL-NEXT: ldr x25, [sp, #88] ; 8-byte Folded Reload
+; GISEL-NEXT: lsl x24, x10, x16
+; GISEL-NEXT: csel x1, xzr, x11, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: orr x1, x28, x1
-; GISEL-NEXT: ldr x21, [sp, #72] ; 8-byte Folded Reload
-; GISEL-NEXT: str x28, [sp, #312] ; 8-byte Folded Spill
+; GISEL-NEXT: ldp x16, x28, [sp, #96] ; 16-byte Folded Reload
+; GISEL-NEXT: orr x1, x24, x1
; GISEL-NEXT: csel x1, x1, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: ldr x28, [sp, #56] ; 8-byte Folded Reload
-; GISEL-NEXT: csel x30, xzr, x21, eq
+; GISEL-NEXT: csel x30, xzr, x28, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: orr x30, x28, x30
+; GISEL-NEXT: orr x30, x25, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x22, eq
+; GISEL-NEXT: csel x30, xzr, x7, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: ldr x22, [sp, #64] ; 8-byte Folded Reload
-; GISEL-NEXT: orr x30, x24, x30
+; GISEL-NEXT: orr x30, x2, x30
+; GISEL-NEXT: ldr x2, [sp, #56] ; 8-byte Folded Reload
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x22, eq
+; GISEL-NEXT: csel x30, xzr, x16, eq
; GISEL-NEXT: cmp x9, #3
; GISEL-NEXT: orr x30, x2, x30
-; GISEL-NEXT: ldr x2, [sp, #280] ; 8-byte Folded Reload
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x2, eq
+; GISEL-NEXT: csel x30, xzr, x27, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: orr x30, x17, x30
-; GISEL-NEXT: ldr x17, [sp, #224] ; 8-byte Folded Reload
+; GISEL-NEXT: mov x27, x13
+; GISEL-NEXT: orr x30, x0, x30
+; GISEL-NEXT: ldr x0, [sp, #248] ; 8-byte Folded Reload
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x0, eq
+; GISEL-NEXT: csel x30, xzr, x15, eq
+; GISEL-NEXT: ldr x15, [sp, #312] ; 8-byte Folded Reload
; GISEL-NEXT: cmp x9, #5
-; GISEL-NEXT: orr x30, x17, x30
+; GISEL-NEXT: orr x30, x15, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x14, eq
-; GISEL-NEXT: ldr x14, [sp, #216] ; 8-byte Folded Reload
+; GISEL-NEXT: csel x30, xzr, x3, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: orr x30, x14, x30
+; GISEL-NEXT: ldr x3, [sp, #40] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x30, x13, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x13, eq
-; GISEL-NEXT: ldr x13, [sp, #200] ; 8-byte Folded Reload
+; GISEL-NEXT: csel x30, xzr, x14, eq
+; GISEL-NEXT: ldp x13, x14, [sp, #256] ; 16-byte Folded Reload
; GISEL-NEXT: cmp x9, #7
; GISEL-NEXT: orr x30, x13, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x15, eq
-; GISEL-NEXT: ldr x15, [sp, #32] ; 8-byte Folded Reload
+; GISEL-NEXT: csel x30, xzr, x14, eq
; GISEL-NEXT: cmp x9, #8
-; GISEL-NEXT: orr x30, x15, x30
+; GISEL-NEXT: orr x30, x3, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x12, eq
+; GISEL-NEXT: csel x30, xzr, x0, eq
; GISEL-NEXT: cmp x9, #9
-; GISEL-NEXT: orr x30, x16, x30
+; GISEL-NEXT: orr x30, x12, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x23, eq
+; GISEL-NEXT: csel x30, xzr, x19, eq
; GISEL-NEXT: cmp x9, #10
-; GISEL-NEXT: orr x30, x5, x30
+; GISEL-NEXT: orr x30, x23, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x7, eq
+; GISEL-NEXT: csel x30, xzr, x17, eq
; GISEL-NEXT: cmp x9, #11
-; GISEL-NEXT: orr x30, x19, x30
+; GISEL-NEXT: orr x30, x21, x30
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x27, eq
+; GISEL-NEXT: csel x30, xzr, x20, eq
; GISEL-NEXT: cmp x9, #12
-; GISEL-NEXT: orr x30, x6, x30
+; GISEL-NEXT: mov x20, x26
+; GISEL-NEXT: orr x30, x26, x30
+; GISEL-NEXT: mov x26, x5
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x30, xzr, x25, eq
+; GISEL-NEXT: csel x30, xzr, x22, eq
; GISEL-NEXT: cmp x9, #13
-; GISEL-NEXT: orr x30, x3, x30
+; GISEL-NEXT: orr x30, x5, x30
+; GISEL-NEXT: ldr x5, [sp, #16] ; 8-byte Folded Reload
; GISEL-NEXT: csel x1, x30, x1, eq
; GISEL-NEXT: cmp x9, #14
-; GISEL-NEXT: lsr x30, x10, x4
-; GISEL-NEXT: csel x1, x26, x1, eq
+; GISEL-NEXT: csel x1, x6, x1, eq
; GISEL-NEXT: cmp x9, #15
+; GISEL-NEXT: lsr x30, x10, x5
; GISEL-NEXT: csel x1, xzr, x1, eq
; GISEL-NEXT: cmp x8, #0
-; GISEL-NEXT: csel x26, x10, x1, eq
-; GISEL-NEXT: lsl x10, x20, x11
+; GISEL-NEXT: csel x5, x10, x1, eq
+; GISEL-NEXT: and x10, x8, #0x3f
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x16, xzr, x30, eq
+; GISEL-NEXT: lsl x10, x4, x10
+; GISEL-NEXT: csel x1, xzr, x30, eq
; GISEL-NEXT: cmp x9, #0
-; GISEL-NEXT: ldr x11, [sp, #296] ; 8-byte Folded Reload
-; GISEL-NEXT: orr x10, x10, x16
-; GISEL-NEXT: ldr x16, [sp, #312] ; 8-byte Folded Reload
+; GISEL-NEXT: ldp x29, x30, [sp, #416] ; 16-byte Folded Reload
+; GISEL-NEXT: orr x10, x10, x1
+; GISEL-NEXT: ldr x1, [sp, #296] ; 8-byte Folded Reload
; GISEL-NEXT: csel x10, x10, xzr, eq
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #1
-; GISEL-NEXT: orr x11, x16, x11
-; GISEL-NEXT: ldr x16, [sp, #272] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x11, x24, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x21, eq
+; GISEL-NEXT: csel x11, xzr, x28, eq
; GISEL-NEXT: cmp x9, #2
-; GISEL-NEXT: orr x11, x28, x11
-; GISEL-NEXT: ldp x29, x30, [sp, #400] ; 16-byte Folded Reload
+; GISEL-NEXT: orr x11, x25, x11
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #16] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x11, eq
+; GISEL-NEXT: csel x11, xzr, x7, eq
; GISEL-NEXT: cmp x9, #3
-; GISEL-NEXT: orr x11, x24, x11
+; GISEL-NEXT: orr x11, x1, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x22, eq
+; GISEL-NEXT: csel x11, xzr, x16, eq
; GISEL-NEXT: cmp x9, #4
-; GISEL-NEXT: orr x11, x16, x11
-; GISEL-NEXT: ldr x16, [sp, #232] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x16, [sp, #280] ; 8-byte Folded Reload
+; GISEL-NEXT: orr x11, x2, x11
; GISEL-NEXT: csel x10, x11, x10, eq
+; GISEL-NEXT: ldr x11, [sp, #240] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x2, eq
+; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #5
; GISEL-NEXT: orr x11, x16, x11
-; GISEL-NEXT: ldp x22, x21, [sp, #368] ; 16-byte Folded Reload
; GISEL-NEXT: csel x10, x11, x10, eq
+; GISEL-NEXT: ldr x11, [sp, #32] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x0, eq
+; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #6
-; GISEL-NEXT: orr x11, x17, x11
+; GISEL-NEXT: orr x11, x15, x11
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #264] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x11, [sp, #328] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #7
-; GISEL-NEXT: orr x11, x14, x11
+; GISEL-NEXT: orr x11, x27, x11
+; GISEL-NEXT: ldp x28, x27, [sp, #336] ; 16-byte Folded Reload
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #256] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x11, [sp, #320] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #8
; GISEL-NEXT: orr x11, x13, x11
-; GISEL-NEXT: ldr x13, [sp, #112] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x13, [sp, #144] ; 8-byte Folded Reload
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #24] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x11, eq
+; GISEL-NEXT: csel x11, xzr, x14, eq
; GISEL-NEXT: cmp x9, #9
-; GISEL-NEXT: orr x11, x15, x11
+; GISEL-NEXT: orr x11, x3, x11
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: tst x8, #0x3f
-; GISEL-NEXT: csel x11, xzr, x12, eq
-; GISEL-NEXT: ldr x12, [sp, #304] ; 8-byte Folded Reload
+; GISEL-NEXT: csel x11, xzr, x0, eq
; GISEL-NEXT: cmp x9, #10
; GISEL-NEXT: orr x11, x12, x11
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #192] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x11, [sp, #232] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: str x11, [x13]
-; GISEL-NEXT: ldp x12, x11, [sp, #176] ; 16-byte Folded Reload
+; GISEL-NEXT: ldp x12, x11, [sp, #216] ; 16-byte Folded Reload
; GISEL-NEXT: stp x11, x12, [x13, #8]
-; GISEL-NEXT: csel x11, xzr, x23, eq
+; GISEL-NEXT: csel x11, xzr, x19, eq
; GISEL-NEXT: cmp x9, #11
-; GISEL-NEXT: orr x11, x5, x11
-; GISEL-NEXT: ldp x24, x23, [sp, #352] ; 16-byte Folded Reload
+; GISEL-NEXT: orr x11, x23, x11
+; GISEL-NEXT: ldp x24, x23, [sp, #368] ; 16-byte Folded Reload
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #168] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x11, [sp, #208] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: str x11, [x13, #24]
-; GISEL-NEXT: ldp x12, x11, [sp, #152] ; 16-byte Folded Reload
+; GISEL-NEXT: ldp x12, x11, [sp, #192] ; 16-byte Folded Reload
; GISEL-NEXT: stp x11, x12, [x13, #32]
-; GISEL-NEXT: csel x11, xzr, x7, eq
+; GISEL-NEXT: csel x11, xzr, x17, eq
; GISEL-NEXT: cmp x9, #12
-; GISEL-NEXT: orr x11, x19, x11
+; GISEL-NEXT: orr x11, x21, x11
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #144] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x11, [sp, #184] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: str x11, [x13, #48]
-; GISEL-NEXT: ldp x12, x11, [sp, #128] ; 16-byte Folded Reload
+; GISEL-NEXT: ldp x12, x11, [sp, #168] ; 16-byte Folded Reload
; GISEL-NEXT: stp x11, x12, [x13, #56]
-; GISEL-NEXT: csel x11, xzr, x27, eq
+; GISEL-NEXT: ldr x11, [sp, #112] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x12, [sp, #136] ; 8-byte Folded Reload
+; GISEL-NEXT: csel x11, xzr, x11, eq
; GISEL-NEXT: cmp x9, #13
-; GISEL-NEXT: orr x11, x6, x11
-; GISEL-NEXT: ldp x28, x27, [sp, #320] ; 16-byte Folded Reload
+; GISEL-NEXT: orr x11, x20, x11
+; GISEL-NEXT: ldp x20, x19, [sp, #400] ; 16-byte Folded Reload
; GISEL-NEXT: csel x10, x11, x10, eq
-; GISEL-NEXT: ldr x11, [sp, #120] ; 8-byte Folded Reload
+; GISEL-NEXT: ldr x11, [sp, #160] ; 8-byte Folded Reload
; GISEL-NEXT: tst x8, #0x3f
; GISEL-NEXT: str x11, [x13, #72]
-; GISEL-NEXT: ldp x12, x11, [sp, #96] ; 16-byte Folded Reload
-; GISEL-NEXT: stp x11, x12, [x13, #80]
-; GISEL-NEXT: csel x11, xzr, x25, eq
+; GISEL-NEXT: ldr x11, [sp, #152] ; 8-byte Folded Reload
+; GISEL-NEXT: str x11, [x13, #80]
+; GISEL-NEXT: csel x11, xzr, x22, eq
; GISEL-NEXT: cmp x9, #14
-; GISEL-NEXT: orr x11, x3, x11
+; GISEL-NEXT: orr x11, x26, x11
+; GISEL-NEXT: ldp x22, x21, [sp, #384] ; 16-byte Folded Reload
; GISEL-NEXT: csel x10, x11, x10, eq
; GISEL-NEXT: cmp x9, #15
-; GISEL-NEXT: ldr x9, [sp, #288] ; 8-byte Folded Reload
-; GISEL-NEXT: ldr x11, [sp, #88] ; 8-byte Folded Reload
-; GISEL-NEXT: csel x9, x9, x10, eq
+; GISEL-NEXT: ldr x9, [sp, #128] ; 8-byte Folded Reload
+; GISEL-NEXT: ldp x26, x25, [sp, #352] ; 16-byte Folded Reload
+; GISEL-NEXT: stp x12, x9, [x13, #88]
+; GISEL-NEXT: csel x9, x6, x10, eq
; GISEL-NEXT: cmp x8, #0
-; GISEL-NEXT: ldr x8, [sp, #80] ; 8-byte Folded Reload
-; GISEL-NEXT: stp x11, x8, [x13, #96]
-; GISEL-NEXT: csel x8, x20, x9, eq
-; GISEL-NEXT: stp x26, x8, [x13, #112]
-; GISEL-NEXT: ldp x20, x19, [sp, #384] ; 16-byte Folded Reload
-; GISEL-NEXT: ldp x26, x25, [sp, #336] ; 16-byte Folded Reload
-; GISEL-NEXT: add sp, sp, #416
+; GISEL-NEXT: ldr x8, [sp, #120] ; 8-byte Folded Reload
+; GISEL-NEXT: stp x8, x5, [x13, #104]
+; GISEL-NEXT: csel x8, x4, x9, eq
+; GISEL-NEXT: str x8, [x13, #120]
+; GISEL-NEXT: add sp, sp, #432
; GISEL-NEXT: ret
entry:
%input_val = load i1024, ptr %input, align 128
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index 63c08dd..b215c51 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -267,7 +267,7 @@ define void @larger_smull(ptr nocapture noundef readonly %x, i16 noundef %y, ptr
; CHECK-SD-NEXT: and x9, x8, #0xfffffff0
; CHECK-SD-NEXT: add x10, x2, #32
; CHECK-SD-NEXT: add x11, x0, #16
-; CHECK-SD-NEXT: mov x12, x9
+; CHECK-SD-NEXT: and x12, x8, #0xfffffff0
; CHECK-SD-NEXT: .LBB3_4: // %vector.body
; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-SD-NEXT: ldp q1, q2, [x11, #-16]
@@ -313,7 +313,7 @@ define void @larger_smull(ptr nocapture noundef readonly %x, i16 noundef %y, ptr
; CHECK-GI-NEXT: and x10, x9, #0xfffffff0
; CHECK-GI-NEXT: add x11, x2, #32
; CHECK-GI-NEXT: add x12, x0, #16
-; CHECK-GI-NEXT: mov x13, x10
+; CHECK-GI-NEXT: and x13, x9, #0xfffffff0
; CHECK-GI-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-NEXT: .LBB3_3: // %vector.body
; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
@@ -428,7 +428,7 @@ define void @larger_umull(ptr nocapture noundef readonly %x, i16 noundef %y, ptr
; CHECK-SD-NEXT: and x9, x8, #0xfffffff0
; CHECK-SD-NEXT: add x10, x2, #32
; CHECK-SD-NEXT: add x11, x0, #16
-; CHECK-SD-NEXT: mov x12, x9
+; CHECK-SD-NEXT: and x12, x8, #0xfffffff0
; CHECK-SD-NEXT: .LBB4_4: // %vector.body
; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-SD-NEXT: ldp q1, q2, [x11, #-16]
@@ -472,7 +472,7 @@ define void @larger_umull(ptr nocapture noundef readonly %x, i16 noundef %y, ptr
; CHECK-GI-NEXT: and x8, x9, #0xfffffff0
; CHECK-GI-NEXT: add x10, x2, #32
; CHECK-GI-NEXT: add x11, x0, #16
-; CHECK-GI-NEXT: mov x12, x8
+; CHECK-GI-NEXT: and x12, x9, #0xfffffff0
; CHECK-GI-NEXT: .LBB4_3: // %vector.body
; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-GI-NEXT: and w13, w1, #0xffff
@@ -596,7 +596,7 @@ define i16 @red_mla_dup_ext_u8_s8_s16(ptr noalias nocapture noundef readonly %A,
; CHECK-SD-NEXT: and x11, x10, #0xfffffff0
; CHECK-SD-NEXT: fmov s2, w9
; CHECK-SD-NEXT: add x8, x0, #8
-; CHECK-SD-NEXT: mov x12, x11
+; CHECK-SD-NEXT: and x12, x10, #0xfffffff0
; CHECK-SD-NEXT: .LBB5_5: // %vector.body
; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-SD-NEXT: ldp d3, d4, [x8, #-8]
@@ -646,10 +646,10 @@ define i16 @red_mla_dup_ext_u8_s8_s16(ptr noalias nocapture noundef readonly %A,
; CHECK-GI-NEXT: movi v0.2d, #0000000000000000
; CHECK-GI-NEXT: movi v1.2d, #0000000000000000
; CHECK-GI-NEXT: add x10, x0, #8
+; CHECK-GI-NEXT: and x11, x8, #0xfffffff0
; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
; CHECK-GI-NEXT: dup v2.8h, w9
; CHECK-GI-NEXT: and x9, x8, #0xfffffff0
-; CHECK-GI-NEXT: mov x11, x9
; CHECK-GI-NEXT: .LBB5_5: // %vector.body
; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-GI-NEXT: ldp d3, d4, [x10, #-8]
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
index f829227..dc35224 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
@@ -563,6 +563,41 @@ declare <8 x i16> @large_vector(<8 x i16> %0) nounwind;
; CHECK-NEXT: .seh_endfunclet
; CHECK-NEXT: .seh_endproc
+declare void @"??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@"()
+; CHECK-LABEL: .def "??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@";
+; CHECK-NEXT: .scl 2;
+; CHECK-NEXT: .type 32;
+; CHECK-NEXT: .endef
+; CHECK-NEXT: .section .wowthk$aa,"xr",discard,"??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@"
+; CHECK-NEXT: .globl "??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@" // -- Begin function ??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: "??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@": // @"??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@"
+; CHECK-NEXT: .weak_anti_dep "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@"
+; CHECK-NEXT: "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@" = "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@"
+; CHECK-NEXT: .weak_anti_dep "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@"
+; CHECK-NEXT: "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@" = "??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@"
+; CHECK-NEXT: .seh_proc "??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@"
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .seh_save_reg_x x30, 16
+; CHECK-NEXT: .seh_endprologue
+; CHECK-NEXT: adrp x8, __os_arm64x_check_icall
+; CHECK-NEXT: adrp x11, "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@"
+; CHECK-NEXT: add x11, x11, :lo12:"??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@"
+; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_check_icall]
+; CHECK-NEXT: adrp x10, $iexit_thunk$cdecl$v$v
+; CHECK-NEXT: add x10, x10, :lo12:$iexit_thunk$cdecl$v$v
+; CHECK-NEXT: blr x8
+; CHECK-NEXT: .seh_startepilogue
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: .seh_save_reg_x x30, 16
+; CHECK-NEXT: .seh_endepilogue
+; CHECK-NEXT: br x11
+; CHECK-NEXT: .seh_endfunclet
+; CHECK-NEXT: .seh_endproc
+
+
+
; CHECK-LABEL: .section .hybmp$x,"yi"
; CHECK-NEXT: .symidx "#func_caller"
; CHECK-NEXT: .symidx $ientry_thunk$cdecl$v$v
@@ -633,6 +668,12 @@ declare <8 x i16> @large_vector(<8 x i16> %0) nounwind;
; CHECK-NEXT: .symidx "#large_vector$exit_thunk"
; CHECK-NEXT: .symidx large_vector
; CHECK-NEXT: .word 0
+; CHECK-NEXT: .symidx "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@"
+; CHECK-NEXT: .symidx $iexit_thunk$cdecl$v$v
+; CHECK-NEXT: .word 4
+; CHECK-NEXT: .symidx "??$exit_thunk@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@$$h@"
+; CHECK-NEXT: .symidx "??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@"
+; CHECK-NEXT: .word 0
define void @func_caller() nounwind {
call void @no_op()
@@ -649,5 +690,6 @@ define void @func_caller() nounwind {
call %T2 @simple_struct(%T1 { i16 0 }, %T2 { i32 0, float 0.0 }, %T3 { i64 0, double 0.0 }, %T4 { i64 0, double 0.0, i8 0 })
call <4 x i8> @small_vector(<4 x i8> <i8 0, i8 0, i8 0, i8 0>)
call <8 x i16> @large_vector(<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
+ call void @"??@md5mangleaaaaaaaaaaaaaaaaaaaaaaa@"()
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll b/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll
index 4c8e589..c23e4e1 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll
@@ -17,7 +17,7 @@ define void @fma_dup_f16(ptr noalias nocapture noundef readonly %A, half noundef
; CHECK-NEXT: and x9, x8, #0xfffffff0
; CHECK-NEXT: add x10, x1, #16
; CHECK-NEXT: add x11, x0, #16
-; CHECK-NEXT: mov x12, x9
+; CHECK-NEXT: and x12, x8, #0xfffffff0
; CHECK-NEXT: .LBB0_4: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldp q1, q4, [x10, #-16]
diff --git a/llvm/test/CodeGen/AArch64/machine-licm-sub-loop.ll b/llvm/test/CodeGen/AArch64/machine-licm-sub-loop.ll
index f6bbdf5..1770bb9 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-sub-loop.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-sub-loop.ll
@@ -14,7 +14,6 @@ define void @foo(i32 noundef %limit, ptr %out, ptr %y) {
; CHECK-NEXT: mov x9, xzr
; CHECK-NEXT: and x12, x10, #0xfffffff0
; CHECK-NEXT: add x13, x1, #32
-; CHECK-NEXT: add x14, x2, #16
; CHECK-NEXT: b .LBB0_3
; CHECK-NEXT: .LBB0_2: // %for.cond1.for.cond.cleanup3_crit_edge.us
; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
@@ -27,52 +26,52 @@ define void @foo(i32 noundef %limit, ptr %out, ptr %y) {
; CHECK-NEXT: // =>This Loop Header: Depth=1
; CHECK-NEXT: // Child Loop BB0_6 Depth 2
; CHECK-NEXT: // Child Loop BB0_9 Depth 2
-; CHECK-NEXT: ldrsh w15, [x2, x9, lsl #1]
+; CHECK-NEXT: ldrsh w14, [x2, x9, lsl #1]
; CHECK-NEXT: cmp w0, #16
; CHECK-NEXT: b.hs .LBB0_5
; CHECK-NEXT: // %bb.4: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: mov x18, xzr
+; CHECK-NEXT: mov x17, xzr
; CHECK-NEXT: b .LBB0_8
; CHECK-NEXT: .LBB0_5: // %vector.ph
; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: dup v0.8h, w15
-; CHECK-NEXT: mov x16, x14
-; CHECK-NEXT: mov x17, x13
-; CHECK-NEXT: mov x18, x12
+; CHECK-NEXT: dup v0.8h, w14
+; CHECK-NEXT: add x15, x2, #16
+; CHECK-NEXT: mov x16, x13
+; CHECK-NEXT: and x17, x10, #0xfffffff0
; CHECK-NEXT: .LBB0_6: // %vector.body
; CHECK-NEXT: // Parent Loop BB0_3 Depth=1
; CHECK-NEXT: // => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldp q1, q4, [x16, #-16]
-; CHECK-NEXT: subs x18, x18, #16
-; CHECK-NEXT: ldp q3, q2, [x17, #-32]
-; CHECK-NEXT: add x16, x16, #32
-; CHECK-NEXT: ldp q6, q5, [x17]
+; CHECK-NEXT: ldp q1, q4, [x15, #-16]
+; CHECK-NEXT: subs x17, x17, #16
+; CHECK-NEXT: ldp q3, q2, [x16, #-32]
+; CHECK-NEXT: add x15, x15, #32
+; CHECK-NEXT: ldp q6, q5, [x16]
; CHECK-NEXT: smlal2 v2.4s, v0.8h, v1.8h
; CHECK-NEXT: smlal v3.4s, v0.4h, v1.4h
; CHECK-NEXT: smlal2 v5.4s, v0.8h, v4.8h
; CHECK-NEXT: smlal v6.4s, v0.4h, v4.4h
-; CHECK-NEXT: stp q3, q2, [x17, #-32]
-; CHECK-NEXT: stp q6, q5, [x17], #64
+; CHECK-NEXT: stp q3, q2, [x16, #-32]
+; CHECK-NEXT: stp q6, q5, [x16], #64
; CHECK-NEXT: b.ne .LBB0_6
; CHECK-NEXT: // %bb.7: // %middle.block
; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
; CHECK-NEXT: cmp x12, x10
-; CHECK-NEXT: mov x18, x12
+; CHECK-NEXT: and x17, x10, #0xfffffff0
; CHECK-NEXT: b.eq .LBB0_2
; CHECK-NEXT: .LBB0_8: // %for.body4.us.preheader
; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: add x16, x18, x8
-; CHECK-NEXT: add x17, x2, x18, lsl #1
-; CHECK-NEXT: sub x18, x10, x18
-; CHECK-NEXT: add x16, x1, x16, lsl #2
+; CHECK-NEXT: add x15, x17, x8
+; CHECK-NEXT: add x16, x2, x17, lsl #1
+; CHECK-NEXT: sub x17, x10, x17
+; CHECK-NEXT: add x15, x1, x15, lsl #2
; CHECK-NEXT: .LBB0_9: // %for.body4.us
; CHECK-NEXT: // Parent Loop BB0_3 Depth=1
; CHECK-NEXT: // => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldrsh w3, [x17], #2
-; CHECK-NEXT: ldr w4, [x16]
-; CHECK-NEXT: subs x18, x18, #1
-; CHECK-NEXT: madd w3, w3, w15, w4
-; CHECK-NEXT: str w3, [x16], #4
+; CHECK-NEXT: ldrsh w18, [x16], #2
+; CHECK-NEXT: ldr w3, [x15]
+; CHECK-NEXT: subs x17, x17, #1
+; CHECK-NEXT: madd w18, w18, w14, w3
+; CHECK-NEXT: str w18, [x15], #4
; CHECK-NEXT: b.ne .LBB0_9
; CHECK-NEXT: b .LBB0_2
; CHECK-NEXT: .LBB0_10: // %for.cond.cleanup
diff --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
index 3caac1d..74b0e69 100644
--- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
+++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
@@ -278,9 +278,9 @@ define i64 @test_and_4(i64 %x, i64 %y) {
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w30, -32
-; CHECK-GI-NEXT: and x20, x0, #0x3
; CHECK-GI-NEXT: mov x19, x0
-; CHECK-GI-NEXT: mov x0, x20
+; CHECK-GI-NEXT: and x20, x0, #0x3
+; CHECK-GI-NEXT: and x0, x0, #0x3
; CHECK-GI-NEXT: bl callee
; CHECK-GI-NEXT: tst x19, #0x3
; CHECK-GI-NEXT: csel x0, x20, x0, eq
diff --git a/llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll b/llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll
index e0f2155..58c01db 100644
--- a/llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll
+++ b/llvm/test/CodeGen/AArch64/reserveXreg-for-regalloc.ll
@@ -7,20 +7,16 @@
define void @foo(i64 %v1, i64 %v2, ptr %ptr) {
; CHECK-LABEL: foo:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: add x3, x0, x1
-; CHECK-NEXT: str x3, [sp, #8] // 8-byte Folded Spill
; CHECK-NEXT: str x3, [x2, #8]
; CHECK-NEXT: ldr x3, [x2, #16]
; CHECK-NEXT: add x3, x0, x3
; CHECK-NEXT: sub x3, x3, x1
; CHECK-NEXT: str x3, [x2, #16]
-; CHECK-NEXT: ldr x3, [sp, #8] // 8-byte Folded Reload
+; CHECK-NEXT: add x3, x0, x1
; CHECK-NEXT: str x3, [x2, #24]
; CHECK-NEXT: str x0, [x2, #32]
; CHECK-NEXT: str x1, [x2, #40]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
%v3 = add i64 %v1, %v2
%p1 = getelementptr i64, ptr %ptr, i64 1
diff --git a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
deleted file mode 100644
index 0298168..0000000
--- a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
+++ /dev/null
@@ -1,1009 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-zpr-predicate-spills -run-pass=greedy %s -o - | FileCheck %s
-# RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-zpr-predicate-spills -start-before=greedy -stop-after=aarch64-expand-pseudo -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=EXPAND
---- |
- source_filename = "<stdin>"
- target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
- target triple = "aarch64--linux-gnu"
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill__save_restore_nzcv() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill__spill_zpr() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill_above_p7() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill_p4_saved() #0 { entry: unreachable }
-
- attributes #0 = {nounwind "target-features"="+sme,+sve" "aarch64_pstate_sm_compatible"}
-...
----
-name: zpr_predicate_spill
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
-body: |
- bb.0.entry:
- liveins: $p0
-
- ; CHECK-LABEL: name: zpr_predicate_spill
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0
-
- ; EXPAND-LABEL: name: zpr_predicate_spill
- ; EXPAND: liveins: $p0, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- RET_ReallyLR implicit $p0
-...
----
-name: zpr_predicate_spill__save_restore_nzcv
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
-body: |
- bb.0.entry:
- liveins: $p0
-
- ; CHECK-LABEL: name: zpr_predicate_spill__save_restore_nzcv
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $nzcv
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0
-
- ; EXPAND-LABEL: name: zpr_predicate_spill__save_restore_nzcv
- ; EXPAND: liveins: $p0, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
- ;
- ; EXPAND-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $fp = MRS 55824, implicit-def $nzcv, implicit $nzcv
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: MSR 55824, $fp, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $nzcv
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0
- $nzcv = IMPLICIT_DEF
-
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- FAKE_USE implicit $nzcv
-
- RET_ReallyLR implicit $p0
-...
----
-name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$x0' }
- - { reg: '$x1' }
- - { reg: '$x2' }
- - { reg: '$x3' }
- - { reg: '$x4' }
- - { reg: '$x5' }
- - { reg: '$x6' }
- - { reg: '$x7' }
-body: |
- bb.0.entry:
- liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
-
- ; CHECK-LABEL: name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $x8 = IMPLICIT_DEF
- ; CHECK-NEXT: $x9 = IMPLICIT_DEF
- ; CHECK-NEXT: $x10 = IMPLICIT_DEF
- ; CHECK-NEXT: $x11 = IMPLICIT_DEF
- ; CHECK-NEXT: $x12 = IMPLICIT_DEF
- ; CHECK-NEXT: $x13 = IMPLICIT_DEF
- ; CHECK-NEXT: $x14 = IMPLICIT_DEF
- ; CHECK-NEXT: $x15 = IMPLICIT_DEF
- ; CHECK-NEXT: $x16 = IMPLICIT_DEF
- ; CHECK-NEXT: $x17 = IMPLICIT_DEF
- ; CHECK-NEXT: $x18 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
-
- ; EXPAND-LABEL: name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr
- ; EXPAND: liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
- ;
- ; EXPAND-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $x8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x15 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x16 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x17 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x18 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: $fp = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $fp, 0 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $fp, 0 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $fp = MRS 55824, implicit-def $nzcv, implicit $nzcv
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: MSR 55824, $fp, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
- $nzcv = IMPLICIT_DEF
- $x8 = IMPLICIT_DEF
- $x9 = IMPLICIT_DEF
- $x10 = IMPLICIT_DEF
- $x11 = IMPLICIT_DEF
- $x12 = IMPLICIT_DEF
- $x13 = IMPLICIT_DEF
- $x14 = IMPLICIT_DEF
- $x15 = IMPLICIT_DEF
- $x16 = IMPLICIT_DEF
- $x17 = IMPLICIT_DEF
- $x18 = IMPLICIT_DEF
-
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
-
- RET_ReallyLR implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
-...
----
-name: zpr_predicate_spill__spill_zpr
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$z0' }
- - { reg: '$z1' }
- - { reg: '$z2' }
- - { reg: '$z3' }
- - { reg: '$z4' }
- - { reg: '$z5' }
- - { reg: '$z6' }
- - { reg: '$z7' }
-body: |
- bb.0.entry:
- liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
-
- ; CHECK-LABEL: name: zpr_predicate_spill__spill_zpr
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $z16 = IMPLICIT_DEF
- ; CHECK-NEXT: $z17 = IMPLICIT_DEF
- ; CHECK-NEXT: $z18 = IMPLICIT_DEF
- ; CHECK-NEXT: $z19 = IMPLICIT_DEF
- ; CHECK-NEXT: $z20 = IMPLICIT_DEF
- ; CHECK-NEXT: $z21 = IMPLICIT_DEF
- ; CHECK-NEXT: $z22 = IMPLICIT_DEF
- ; CHECK-NEXT: $z23 = IMPLICIT_DEF
- ; CHECK-NEXT: $z24 = IMPLICIT_DEF
- ; CHECK-NEXT: $z25 = IMPLICIT_DEF
- ; CHECK-NEXT: $z26 = IMPLICIT_DEF
- ; CHECK-NEXT: $z27 = IMPLICIT_DEF
- ; CHECK-NEXT: $z28 = IMPLICIT_DEF
- ; CHECK-NEXT: $z29 = IMPLICIT_DEF
- ; CHECK-NEXT: $z30 = IMPLICIT_DEF
- ; CHECK-NEXT: $z31 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7
-
- ; EXPAND-LABEL: name: zpr_predicate_spill__spill_zpr
- ; EXPAND: liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4, $z23, $z22, $z21, $z20, $z19, $z18, $z17, $z16
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.22)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -20, implicit $vg
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 0 :: (store (s128) into %stack.21)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 1 :: (store (s128) into %stack.20)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 2 :: (store (s128) into %stack.19)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 3 :: (store (s128) into %stack.18)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 4 :: (store (s128) into %stack.17)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 5 :: (store (s128) into %stack.16)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 6 :: (store (s128) into %stack.15)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 7 :: (store (s128) into %stack.14)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 8 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 9 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 10 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 11 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z23, $sp, 12 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z22, $sp, 13 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z21, $sp, 14 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z20, $sp, 15 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z19, $sp, 16 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z18, $sp, 17 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z17, $sp, 18 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z16, $sp, 19 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
- ;
- ; EXPAND-NEXT: $z16 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z17 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z18 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z19 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z20 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z21 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z22 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z23 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z24 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z25 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z26 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z27 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z28 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z29 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z30 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z31 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.24)
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 1 :: (store (s128) into %stack.0)
- ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 0 :: (load (s128) from %stack.24)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.24)
- ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 1 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.24)
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 12 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 13 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $z21 = frame-destroy LDR_ZXI $sp, 14 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $z20 = frame-destroy LDR_ZXI $sp, 15 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $z19 = frame-destroy LDR_ZXI $sp, 16 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $z18 = frame-destroy LDR_ZXI $sp, 17 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $z17 = frame-destroy LDR_ZXI $sp, 18 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $z16 = frame-destroy LDR_ZXI $sp, 19 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.21)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.20)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.19)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.18)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.17)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.16)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.15)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.14)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 20, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.22)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7
- $z16 = IMPLICIT_DEF
- $z17 = IMPLICIT_DEF
- $z18 = IMPLICIT_DEF
- $z19 = IMPLICIT_DEF
- $z20 = IMPLICIT_DEF
- $z21 = IMPLICIT_DEF
- $z22 = IMPLICIT_DEF
- $z23 = IMPLICIT_DEF
- $z24 = IMPLICIT_DEF
- $z25 = IMPLICIT_DEF
- $z26 = IMPLICIT_DEF
- $z27 = IMPLICIT_DEF
- $z28 = IMPLICIT_DEF
- $z29 = IMPLICIT_DEF
- $z30 = IMPLICIT_DEF
- $z31 = IMPLICIT_DEF
-
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31
-
- RET_ReallyLR implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7
-...
----
-name: zpr_predicate_spill_above_p7
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$p1' }
- - { reg: '$p2' }
- - { reg: '$p3' }
-body: |
- bb.0.entry:
- liveins: $p0, $p1, $p2, $p3
-
- ; CHECK-LABEL: name: zpr_predicate_spill_above_p7
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0, $p1, $p2, $p3
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p15, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p15 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-
- ; EXPAND-LABEL: name: zpr_predicate_spill_above_p7
- ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
- ;
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p15, 1, 0
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 1 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.16)
- ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 1 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.16)
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3
- $p15 = IMPLICIT_DEF
- %1:ppr = COPY $p15
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p15 = COPY %1
-
- FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7
-
- RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-...
----
-name: zpr_predicate_spill_p4_saved
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$p1' }
- - { reg: '$p2' }
- - { reg: '$p3' }
-body: |
- bb.0.entry:
- liveins: $p0, $p1, $p2, $p3
-
- ; CHECK-LABEL: name: zpr_predicate_spill_p4_saved
- ; CHECK: liveins: $p0, $p1, $p2, $p3
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-
- ; EXPAND-LABEL: name: zpr_predicate_spill_p4_saved
- ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p8, $p4
- ; EXPAND-NEXT: {{ $}}
- ; EXPAND-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.1)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.1)
- ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3
-
- ; If we spill a register above p8, p4 must also be saved, so we can guarantee
- ; they will be a register (in the range p0-p7 to for the cmpne reload).
- $p8 = IMPLICIT_DEF
-
- RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-...
diff --git a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
index 01e3d3a..c0a2943 100644
--- a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
@@ -1,7 +1,5 @@
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-remark-size=64 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-size=1024 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-PADDING
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-enable-zpr-predicate-spills -aarch64-stack-hazard-remark-size=64 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-ZPR-PRED-SPILLS
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-enable-zpr-predicate-spills -aarch64-stack-hazard-size=1024 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-ZPR-PRED-SPILLS-WITH-PADDING
; Don't emit remarks for non-streaming functions.
define float @csr_x20_stackargs_notsc(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) {
@@ -69,16 +67,11 @@ entry:
; SVE calling conventions
; Padding is placed between predicate and fpr/zpr register spills, so only emit remarks when hazard padding is off.
-; Note: The -aarch64-enable-zpr-predicate-spills option is deprecated (and will be removed soon).
define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 {
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale]
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_call':
-; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at {{.*}} is too close to GPR stack object
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
%call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
@@ -89,10 +82,6 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale]
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call':
-; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at {{.*}} is too close to GPR stack object
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
%0 = alloca [37 x i8], align 16
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 5fc996a..0f62997 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -23,7 +23,7 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: add x13, x1, #16
; CHECK-NEXT: add x8, x1, x10, lsl #2
; CHECK-NEXT: add x9, x0, x10
-; CHECK-NEXT: mov x14, x10
+; CHECK-NEXT: and x14, x11, #0x1fffffff8
; CHECK-NEXT: .LBB0_4: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldp q1, q2, [x13, #-16]
@@ -194,9 +194,9 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: mov w8, #1132396544 // =0x437f0000
; CHECK-NEXT: and x10, x11, #0x1fffffffc
; CHECK-NEXT: dup v0.4s, w8
+; CHECK-NEXT: and x12, x11, #0x1fffffffc
; CHECK-NEXT: add x8, x1, x10, lsl #3
; CHECK-NEXT: add x9, x0, x10, lsl #1
-; CHECK-NEXT: mov x12, x10
; CHECK-NEXT: .LBB1_9: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld2 { v1.4s, v2.4s }, [x1], #32
@@ -341,7 +341,7 @@ define void @loop3(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: dup v0.4s, w8
; CHECK-NEXT: ldr q1, [x12, :lo12:.LCPI2_0]
; CHECK-NEXT: add x9, x10, x10, lsl #1
-; CHECK-NEXT: mov x12, x10
+; CHECK-NEXT: and x12, x11, #0x1fffffffc
; CHECK-NEXT: add x8, x1, x9, lsl #2
; CHECK-NEXT: add x9, x0, x9
; CHECK-NEXT: .LBB2_4: // %vector.body
@@ -597,7 +597,7 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: ldr q1, [x12, :lo12:.LCPI3_0]
; CHECK-NEXT: add x8, x1, x10, lsl #4
; CHECK-NEXT: add x9, x0, x10, lsl #2
-; CHECK-NEXT: mov x12, x10
+; CHECK-NEXT: and x12, x11, #0x1fffffffc
; CHECK-NEXT: .LBB3_9: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld4 { v2.4s, v3.4s, v4.4s, v5.4s }, [x1], #64
diff --git a/llvm/test/CodeGen/AArch64/trampoline.ll b/llvm/test/CodeGen/AArch64/trampoline.ll
index 0e68270..3e933fa 100644
--- a/llvm/test/CodeGen/AArch64/trampoline.ll
+++ b/llvm/test/CodeGen/AArch64/trampoline.ll
@@ -263,3 +263,9 @@ define i64 @func2() {
%fp = call ptr @llvm.adjust.trampoline(ptr @trampg)
ret i64 0
}
+
+; Check for the explicitly emitted .note.GNU-stack section (ELF only) in the
+; presence of trampolines.
+; UTC_ARGS: --disable
+; CHECK-LINUX: .section ".note.GNU-stack","x",@progbits
+; UTC_ARGS: --enable
diff --git a/llvm/test/CodeGen/AMDGPU/.#llvm.amdgcn.smfmac.gfx950.ll b/llvm/test/CodeGen/AMDGPU/.#llvm.amdgcn.smfmac.gfx950.ll
deleted file mode 120000
index 8747bd5..0000000
--- a/llvm/test/CodeGen/AMDGPU/.#llvm.amdgcn.smfmac.gfx950.ll
+++ /dev/null
@@ -1 +0,0 @@
-matt@mattbookAMD.56897 \ No newline at end of file
diff --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
index 9e24023..ebbeab9 100644
--- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
@@ -146,9 +146,9 @@ define void @no_free_vgprs_at_agpr_to_agpr_copy(float %v0, float %v1) #0 {
; GFX908-NEXT: ;;#ASMSTART
; GFX908-NEXT: ; copy
; GFX908-NEXT: ;;#ASMEND
-; GFX908-NEXT: v_accvgpr_read_b32 v32, a2
+; GFX908-NEXT: v_accvgpr_read_b32 v39, a2
; GFX908-NEXT: s_nop 1
-; GFX908-NEXT: v_accvgpr_write_b32 a3, v32
+; GFX908-NEXT: v_accvgpr_write_b32 a3, v39
; GFX908-NEXT: ;;#ASMSTART
; GFX908-NEXT: ; use a3 v[0:31]
; GFX908-NEXT: ;;#ASMEND
@@ -437,9 +437,9 @@ define void @v32_asm_def_use(float %v0, float %v1) #4 {
; GFX908-NEXT: ; copy
; GFX908-NEXT: ;;#ASMEND
; GFX908-NEXT: s_nop 7
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a2
+; GFX908-NEXT: v_accvgpr_read_b32 v35, a2
; GFX908-NEXT: s_nop 1
-; GFX908-NEXT: v_accvgpr_write_b32 a3, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a3, v35
; GFX908-NEXT: ;;#ASMSTART
; GFX908-NEXT: ; use a3 v[0:31]
; GFX908-NEXT: ;;#ASMEND
@@ -1045,9 +1045,9 @@ define void @no_free_vgprs_at_sgpr_to_agpr_copy(float %v0, float %v1) #0 {
; GFX908-NEXT: ;;#ASMSTART
; GFX908-NEXT: ; copy
; GFX908-NEXT: ;;#ASMEND
-; GFX908-NEXT: v_accvgpr_read_b32 v32, a2
+; GFX908-NEXT: v_accvgpr_read_b32 v39, a2
; GFX908-NEXT: s_nop 1
-; GFX908-NEXT: v_accvgpr_write_b32 a3, v32
+; GFX908-NEXT: v_accvgpr_write_b32 a3, v39
; GFX908-NEXT: ;;#ASMSTART
; GFX908-NEXT: ; use a3 v[0:31]
; GFX908-NEXT: ;;#ASMEND
diff --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-propagation.mir b/llvm/test/CodeGen/AMDGPU/agpr-copy-propagation.mir
index a42cf43..7e82382d 100644
--- a/llvm/test/CodeGen/AMDGPU/agpr-copy-propagation.mir
+++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-propagation.mir
@@ -40,8 +40,8 @@ body: |
; GFX908: liveins: $agpr0
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: renamable $vgpr0 = COPY renamable $agpr0, implicit $exec
- ; GFX908-NEXT: renamable $agpr1 = COPY renamable $vgpr0, implicit $exec
- ; GFX908-NEXT: renamable $agpr2 = COPY renamable $vgpr0, implicit $exec
+ ; GFX908-NEXT: renamable $agpr1 = COPY $agpr0, implicit $exec
+ ; GFX908-NEXT: renamable $agpr2 = COPY $agpr0, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $vgpr0, implicit $agpr1, implicit $agpr2
;
; GFX90A-LABEL: name: do_not_propagate_agpr_to_agpr
diff --git a/llvm/test/CodeGen/AMDGPU/elf-header-flags-sramecc.ll b/llvm/test/CodeGen/AMDGPU/elf-header-flags-sramecc.ll
index c4479b3..e3bc516 100644
--- a/llvm/test/CodeGen/AMDGPU/elf-header-flags-sramecc.ll
+++ b/llvm/test/CodeGen/AMDGPU/elf-header-flags-sramecc.ll
@@ -15,6 +15,9 @@
; RUN: llc -filetype=obj -mtriple=amdgcn -mcpu=gfx950 < %s | llvm-readobj --file-header - | FileCheck --check-prefix=SRAM-ECC-GFX950 %s
; RUN: llc -filetype=obj -mtriple=amdgcn -mcpu=gfx950 -mattr=+sramecc < %s | llvm-readobj --file-header - | FileCheck --check-prefix=SRAM-ECC-GFX950 %s
+; RUN: llc -filetype=obj -mtriple=amdgcn -mcpu=gfx1250 < %s | llvm-readobj --file-header - | FileCheck --check-prefix=SRAM-ECC-GFX1250 %s
+; RUN: llc -filetype=obj -mtriple=amdgcn -mcpu=gfx1250 -mattr=+sramecc < %s | llvm-readobj --file-header - | FileCheck --check-prefix=SRAM-ECC-GFX1250 %s
+
; NO-SRAM-ECC-GFX906: Flags [
; NO-SRAM-ECC-GFX906-NEXT: EF_AMDGPU_FEATURE_XNACK_V3 (0x100)
; NO-SRAM-ECC-GFX906-NEXT: EF_AMDGPU_MACH_AMDGCN_GFX906 (0x2F)
@@ -52,6 +55,11 @@
; SRAM-ECC-GFX950: EF_AMDGPU_MACH_AMDGCN_GFX950 (0x4F)
; SRAM-ECC-GFX950: ]
+; SRAM-ECC-GFX1250: Flags [
+; SRAM-ECC-GFX1250: EF_AMDGPU_FEATURE_SRAMECC_V3 (0x200)
+; SRAM-ECC-GFX1250: EF_AMDGPU_MACH_AMDGCN_GFX1250 (0x49)
+; SRAM-ECC-GFX1250: ]
+
define amdgpu_kernel void @elf_header() {
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/mfma-no-register-aliasing.ll b/llvm/test/CodeGen/AMDGPU/mfma-no-register-aliasing.ll
index 51cd564..f46116e 100644
--- a/llvm/test/CodeGen/AMDGPU/mfma-no-register-aliasing.ll
+++ b/llvm/test/CodeGen/AMDGPU/mfma-no-register-aliasing.ll
@@ -95,66 +95,66 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32(ptr addrspace(1) %arg) #0 {
; GREEDY908-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v3, v0, a[0:31]
; GREEDY908-NEXT: s_nop 15
; GREEDY908-NEXT: s_nop 1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a32
-; GREEDY908-NEXT: v_accvgpr_read_b32 v5, a61
-; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a60
-; GREEDY908-NEXT: v_accvgpr_write_b32 a2, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a33
-; GREEDY908-NEXT: v_accvgpr_read_b32 v7, a59
-; GREEDY908-NEXT: v_accvgpr_read_b32 v8, a58
-; GREEDY908-NEXT: v_accvgpr_write_b32 a3, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a32
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a33
; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a34
-; GREEDY908-NEXT: v_accvgpr_read_b32 v9, a57
-; GREEDY908-NEXT: v_accvgpr_read_b32 v10, a56
+; GREEDY908-NEXT: v_accvgpr_write_b32 a2, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a3, v6
; GREEDY908-NEXT: v_accvgpr_write_b32 a4, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a35
-; GREEDY908-NEXT: v_accvgpr_read_b32 v11, a55
-; GREEDY908-NEXT: v_accvgpr_read_b32 v12, a54
-; GREEDY908-NEXT: v_accvgpr_write_b32 a5, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a36
-; GREEDY908-NEXT: v_accvgpr_read_b32 v13, a53
-; GREEDY908-NEXT: v_accvgpr_read_b32 v14, a52
-; GREEDY908-NEXT: v_accvgpr_write_b32 a6, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a35
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a36
; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a37
-; GREEDY908-NEXT: v_accvgpr_read_b32 v15, a51
-; GREEDY908-NEXT: v_accvgpr_read_b32 v16, a50
+; GREEDY908-NEXT: v_accvgpr_write_b32 a5, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a6, v6
; GREEDY908-NEXT: v_accvgpr_write_b32 a7, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a38
-; GREEDY908-NEXT: v_accvgpr_read_b32 v17, a49
-; GREEDY908-NEXT: v_accvgpr_read_b32 v18, a48
-; GREEDY908-NEXT: v_accvgpr_write_b32 a8, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a39
-; GREEDY908-NEXT: v_accvgpr_read_b32 v19, a47
-; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a46
-; GREEDY908-NEXT: v_accvgpr_write_b32 a9, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a38
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a39
; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a40
-; GREEDY908-NEXT: v_accvgpr_write_b32 a16, v2
-; GREEDY908-NEXT: v_accvgpr_write_b32 a17, v19
+; GREEDY908-NEXT: v_accvgpr_write_b32 a8, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a9, v6
; GREEDY908-NEXT: v_accvgpr_write_b32 a10, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a41
-; GREEDY908-NEXT: v_accvgpr_write_b32 a18, v18
-; GREEDY908-NEXT: v_accvgpr_write_b32 a19, v17
-; GREEDY908-NEXT: v_accvgpr_write_b32 a11, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a42
-; GREEDY908-NEXT: v_accvgpr_write_b32 a20, v16
-; GREEDY908-NEXT: v_accvgpr_write_b32 a21, v15
-; GREEDY908-NEXT: v_accvgpr_write_b32 a12, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a41
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a42
; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a43
-; GREEDY908-NEXT: v_accvgpr_write_b32 a22, v14
-; GREEDY908-NEXT: v_accvgpr_write_b32 a23, v13
+; GREEDY908-NEXT: v_accvgpr_write_b32 a11, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a12, v6
; GREEDY908-NEXT: v_accvgpr_write_b32 a13, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a44
-; GREEDY908-NEXT: v_accvgpr_write_b32 a24, v12
-; GREEDY908-NEXT: v_accvgpr_write_b32 a25, v11
-; GREEDY908-NEXT: v_accvgpr_write_b32 a14, v1
-; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a45
-; GREEDY908-NEXT: v_accvgpr_write_b32 a26, v10
-; GREEDY908-NEXT: v_accvgpr_write_b32 a27, v9
-; GREEDY908-NEXT: v_accvgpr_write_b32 a15, v1
-; GREEDY908-NEXT: v_accvgpr_write_b32 a28, v8
-; GREEDY908-NEXT: v_accvgpr_write_b32 a29, v7
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a44
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a45
+; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a46
+; GREEDY908-NEXT: v_accvgpr_write_b32 a14, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a15, v6
+; GREEDY908-NEXT: v_accvgpr_write_b32 a16, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a47
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a48
+; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a49
+; GREEDY908-NEXT: v_accvgpr_write_b32 a17, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a18, v6
+; GREEDY908-NEXT: v_accvgpr_write_b32 a19, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a50
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a51
+; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a52
+; GREEDY908-NEXT: v_accvgpr_write_b32 a20, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a21, v6
+; GREEDY908-NEXT: v_accvgpr_write_b32 a22, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a53
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a54
+; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a55
+; GREEDY908-NEXT: v_accvgpr_write_b32 a23, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a24, v6
+; GREEDY908-NEXT: v_accvgpr_write_b32 a25, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a56
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a57
+; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a58
+; GREEDY908-NEXT: v_accvgpr_write_b32 a26, v2
+; GREEDY908-NEXT: v_accvgpr_write_b32 a27, v6
+; GREEDY908-NEXT: v_accvgpr_write_b32 a28, v1
+; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a59
+; GREEDY908-NEXT: v_accvgpr_read_b32 v6, a60
+; GREEDY908-NEXT: v_accvgpr_read_b32 v1, a61
+; GREEDY908-NEXT: v_accvgpr_write_b32 a29, v2
; GREEDY908-NEXT: v_accvgpr_write_b32 a30, v6
-; GREEDY908-NEXT: v_accvgpr_write_b32 a31, v5
+; GREEDY908-NEXT: v_accvgpr_write_b32 a31, v1
; GREEDY908-NEXT: s_nop 0
; GREEDY908-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v3, v0, a[0:31]
; GREEDY908-NEXT: s_nop 15
@@ -667,11 +667,11 @@ define amdgpu_kernel void @test_mfma_f32_16x16x1f32(ptr addrspace(1) %arg) #0 {
; GREEDY908-NEXT: v_mfma_f32_16x16x1f32 a[18:33], v0, v1, a[18:33]
; GREEDY908-NEXT: v_mfma_f32_16x16x1f32 a[2:17], v0, v1, a[18:33]
; GREEDY908-NEXT: s_nop 8
+; GREEDY908-NEXT: v_accvgpr_read_b32 v5, a18
; GREEDY908-NEXT: v_accvgpr_read_b32 v2, a19
-; GREEDY908-NEXT: v_accvgpr_read_b32 v3, a18
; GREEDY908-NEXT: s_nop 0
+; GREEDY908-NEXT: v_accvgpr_write_b32 a0, v5
; GREEDY908-NEXT: v_accvgpr_write_b32 a1, v2
-; GREEDY908-NEXT: v_accvgpr_write_b32 a0, v3
; GREEDY908-NEXT: s_nop 0
; GREEDY908-NEXT: v_mfma_f32_16x16x1f32 a[0:15], v0, v1, a[0:15]
; GREEDY908-NEXT: s_nop 9
diff --git a/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
index cf244f0..be1788c 100644
--- a/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
@@ -54,19 +54,20 @@ define amdgpu_kernel void @matmul_kernel(i32 %a0, i32 %a1) {
; GFX908-NEXT: s_branch .LBB0_2
; GFX908-NEXT: .LBB0_1: ; %bb2
; GFX908-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; GFX908-NEXT: s_nop 6
+; GFX908-NEXT: v_accvgpr_read_b32 v3, a2
; GFX908-NEXT: s_or_b32 s4, s3, 1
; GFX908-NEXT: s_ashr_i32 s5, s3, 31
; GFX908-NEXT: s_mov_b32 s3, s2
; GFX908-NEXT: v_mov_b32_e32 v1, s2
-; GFX908-NEXT: s_nop 2
-; GFX908-NEXT: v_accvgpr_read_b32 v0, a2
; GFX908-NEXT: v_mov_b32_e32 v2, s3
+; GFX908-NEXT: v_accvgpr_write_b32 a0, v3
; GFX908-NEXT: v_accvgpr_read_b32 v4, a1
; GFX908-NEXT: v_accvgpr_read_b32 v3, a1
-; GFX908-NEXT: v_accvgpr_write_b32 a0, v0
+; GFX908-NEXT: s_and_b32 s3, s5, s4
; GFX908-NEXT: v_accvgpr_write_b32 a2, v4
; GFX908-NEXT: v_accvgpr_write_b32 a3, v3
-; GFX908-NEXT: s_and_b32 s3, s5, s4
+; GFX908-NEXT: s_nop 0
; GFX908-NEXT: v_mfma_f32_16x16x16f16 a[2:5], v[1:2], v[1:2], a[0:3]
; GFX908-NEXT: s_cbranch_execz .LBB0_4
; GFX908-NEXT: .LBB0_2: ; %bb
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
index 6b7d704..ede470b 100644
--- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
@@ -1,13 +1,11 @@
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 < %s | FileCheck --check-prefixes=CHECK,GFX11 %s
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 < %s | FileCheck --check-prefixes=CHECK,GFX12 %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr < %s | FileCheck --check-prefixes=CHECK,GFX12,DVGPR %s
; CHECK: .amdgpu_pal_metadata
; CHECK-NEXT: ---
; CHECK-NEXT: amdpal.pipelines:
; CHECK-NEXT: - .api: Vulkan
; CHECK-NEXT: .compute_registers:
-; DVGPR-NEXT: .dynamic_vgpr_en: true
; CHECK-NEXT: .tg_size_en: true
; CHECK-NEXT: .tgid_x_en: false
; CHECK-NEXT: .tgid_y_en: false
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
index 5c0c366..5325499 100644
--- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
@@ -1,17 +1,14 @@
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr <%s | FileCheck %s --check-prefixes=CHECK,DVGPR
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK
; CHECK-LABEL: {{^}}_amdgpu_cs_main:
-; NODVGPR: ; TotalNumSgprs: 4
-; DVGPR: ; TotalNumSgprs: 34
+; CHECK: ; TotalNumSgprs: 4
; CHECK: ; NumVgprs: 2
; CHECK: .amdgpu_pal_metadata
; CHECK-NEXT: ---
; CHECK-NEXT: amdpal.pipelines:
; CHECK-NEXT: - .api: Vulkan
; CHECK-NEXT: .compute_registers:
-; DVGPR-NEXT: .dynamic_vgpr_en: true
; CHECK-NEXT: .tg_size_en: true
; CHECK-NEXT: .tgid_x_en: false
; CHECK-NEXT: .tgid_y_en: false
@@ -57,7 +54,6 @@
; CHECK-NEXT: .cs:
; CHECK-NEXT: .checksum_value: 0x9444d7d0
; CHECK-NEXT: .debug_mode: false
-; DVGPR-NEXT: .dynamic_vgpr_saved_count: 0x70
; CHECK-NEXT: .entry_point: _amdgpu_cs_main
; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main
; CHECK-NEXT: .excp_en: 0
@@ -69,8 +65,7 @@
; CHECK-NEXT: .mem_ordered: true
; CHECK-NEXT: .scratch_en: false
; CHECK-NEXT: .scratch_memory_size: 0
-; NODVGPR-NEXT: .sgpr_count: 0x4
-; DVGPR-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .sgpr_count: 0x4
; CHECK-NEXT: .sgpr_limit: 0x6a
; CHECK-NEXT: .threadgroup_dimensions:
; CHECK-NEXT: - 0x1
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll
new file mode 100644
index 0000000..e598b0c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll
@@ -0,0 +1,204 @@
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK
+
+; CHECK-LABEL: {{^}}_amdgpu_cs_main:
+; CHECK: ; TotalNumSgprs: 34
+; CHECK: ; NumVgprs: 2
+; CHECK: .amdgpu_pal_metadata
+; CHECK-NEXT: ---
+; CHECK-NEXT: amdpal.pipelines:
+; CHECK-NEXT: - .api: Vulkan
+; CHECK-NEXT: .compute_registers:
+; CHECK-NEXT: .dynamic_vgpr_en: true
+; CHECK-NEXT: .tg_size_en: true
+; CHECK-NEXT: .tgid_x_en: false
+; CHECK-NEXT: .tgid_y_en: false
+; CHECK-NEXT: .tgid_z_en: false
+; CHECK-NEXT: .tidig_comp_cnt: 0x1
+; CHECK-NEXT: .graphics_registers:
+; CHECK-NEXT: .ps_extra_lds_size: 0
+; CHECK-NEXT: .spi_ps_input_addr:
+; CHECK-NEXT: .ancillary_ena: false
+; CHECK-NEXT: .front_face_ena: true
+; CHECK-NEXT: .line_stipple_tex_ena: false
+; CHECK-NEXT: .linear_center_ena: true
+; CHECK-NEXT: .linear_centroid_ena: true
+; CHECK-NEXT: .linear_sample_ena: true
+; CHECK-NEXT: .persp_center_ena: true
+; CHECK-NEXT: .persp_centroid_ena: true
+; CHECK-NEXT: .persp_pull_model_ena: false
+; CHECK-NEXT: .persp_sample_ena: true
+; CHECK-NEXT: .pos_fixed_pt_ena: true
+; CHECK-NEXT: .pos_w_float_ena: false
+; CHECK-NEXT: .pos_x_float_ena: false
+; CHECK-NEXT: .pos_y_float_ena: false
+; CHECK-NEXT: .pos_z_float_ena: false
+; CHECK-NEXT: .sample_coverage_ena: false
+; CHECK-NEXT: .spi_ps_input_ena:
+; CHECK-NEXT: .ancillary_ena: false
+; CHECK-NEXT: .front_face_ena: false
+; CHECK-NEXT: .line_stipple_tex_ena: false
+; CHECK-NEXT: .linear_center_ena: false
+; CHECK-NEXT: .linear_centroid_ena: false
+; CHECK-NEXT: .linear_sample_ena: false
+; CHECK-NEXT: .persp_center_ena: false
+; CHECK-NEXT: .persp_centroid_ena: false
+; CHECK-NEXT: .persp_pull_model_ena: false
+; CHECK-NEXT: .persp_sample_ena: true
+; CHECK-NEXT: .pos_fixed_pt_ena: false
+; CHECK-NEXT: .pos_w_float_ena: false
+; CHECK-NEXT: .pos_x_float_ena: false
+; CHECK-NEXT: .pos_y_float_ena: false
+; CHECK-NEXT: .pos_z_float_ena: false
+; CHECK-NEXT: .sample_coverage_ena: false
+; CHECK-NEXT: .hardware_stages:
+; CHECK-NEXT: .cs:
+; CHECK-NEXT: .checksum_value: 0x9444d7d0
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NEXT: .dynamic_vgpr_saved_count: 0x70
+; CHECK-NOT: .entry_point: _amdgpu_cs_main
+; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main
+; CHECK-NEXT: .excp_en: 0
+; CHECK-NEXT: .float_mode: 0xc0
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .image_op: false
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .sgpr_limit: 0x6a
+; CHECK-NEXT: .threadgroup_dimensions:
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: - 0x400
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: .trap_present: false
+; CHECK-NEXT: .user_data_reg_map:
+; CHECK-NEXT: - 0x10000000
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: .user_sgprs: 0x3
+; CHECK-NEXT: .vgpr_count: 0x2
+; CHECK-NEXT: .vgpr_limit: 0x100
+; CHECK-NEXT: .wavefront_size: 0x40
+; CHECK-NEXT: .wgp_mode: false
+; CHECK-NEXT: .gs:
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NOT: .entry_point: _amdgpu_gs_main
+; CHECK-NEXT: .entry_point_symbol: gs_shader
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .lds_size: 0x200
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x1
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: .wgp_mode: true
+; CHECK-NEXT: .hs:
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NOT: .entry_point: _amdgpu_hs_main
+; CHECK-NEXT: .entry_point_symbol: hs_shader
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .lds_size: 0x1000
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x1
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: .wgp_mode: true
+; CHECK-NEXT: .ps:
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NOT: .entry_point: _amdgpu_ps_main
+; CHECK-NEXT: .entry_point_symbol: ps_shader
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x1
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: .wgp_mode: true
+; CHECK: .registers: {}
+; CHECK:amdpal.version:
+; CHECK-NEXT: - 0x3
+; CHECK-NEXT: - 0x6
+; CHECK-NEXT:...
+; CHECK-NEXT: .end_amdgpu_pal_metadata
+
+define dllexport amdgpu_cs void @_amdgpu_cs_main(i32 inreg %arg1, i32 %arg2) #0 !lgc.shaderstage !1 {
+.entry:
+ %i = call i64 @llvm.amdgcn.s.getpc()
+ %i1 = and i64 %i, -4294967296
+ %i2 = zext i32 %arg1 to i64
+ %i3 = or i64 %i1, %i2
+ %i4 = inttoptr i64 %i3 to ptr addrspace(4)
+ %i5 = and i32 %arg2, 1023
+ %i6 = lshr i32 %arg2, 10
+ %i7 = and i32 %i6, 1023
+ %i8 = add nuw nsw i32 %i7, %i5
+ %i9 = load <4 x i32>, ptr addrspace(4) %i4, align 16
+ %.idx = shl nuw nsw i32 %i8, 2
+ call void @llvm.amdgcn.raw.buffer.store.i32(i32 1, <4 x i32> %i9, i32 %.idx, i32 0, i32 0)
+ ret void
+}
+
+define dllexport amdgpu_ps void @ps_shader() #1 {
+ ret void
+}
+
+@LDS.GS = external addrspace(3) global [1 x i32], align 4
+
+define dllexport amdgpu_gs void @gs_shader() {
+ %ptr = getelementptr i32, ptr addrspace(3) @LDS.GS, i32 0
+ store i32 0, ptr addrspace(3) %ptr, align 4
+ ret void
+}
+
+@LDS.HS = external addrspace(3) global [1024 x i32], align 4
+
+define dllexport amdgpu_hs void @hs_shader() {
+ %ptr = getelementptr i32, ptr addrspace(3) @LDS.HS, i32 0
+ store i32 0, ptr addrspace(3) %ptr, align 4
+ ret void
+}
+
+!amdgpu.pal.metadata.msgpack = !{!0}
+
+attributes #0 = { nounwind memory(readwrite) "target-features"=",+wavefrontsize64,+cumode" "amdgpu-dynamic-vgpr-block-size"="16" }
+
+attributes #1 = { nounwind memory(readwrite) "InitialPSInputAddr"="36983" "amdgpu-dynamic-vgpr-block-size"="16" }
+
+!0 = !{!"\82\B0amdpal.pipelines\91\8A\A4.api\A6Vulkan\B2.compute_registers\85\AB.tg_size_en\C3\AA.tgid_x_en\C2\AA.tgid_y_en\C2\AA.tgid_z_en\C2\AF.tidig_comp_cnt\01\B0.hardware_stages\81\A3.cs\8C\AF.checksum_value\CE\94D\D7\D0\AB.debug_mode\00\AB.float_mode\CC\C0\A9.image_op\C2\AC.mem_ordered\C3\AB.sgpr_limitj\B7.threadgroup_dimensions\93\01\CD\04\00\01\AD.trap_present\00\B2.user_data_reg_map\DC\00 \CE\10\00\00\00\CE\FF\FF\FF\FF\00\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\AB.user_sgprs\03\AB.vgpr_limit\CD\01\00\AF.wavefront_size@\B7.internal_pipeline_hash\92\CF\E7\10k\A6:\A6%\F7\CF\B2\1F\1A\D4{\DA\E1T\AA.registers\80\A8.shaders\81\A8.compute\82\B0.api_shader_hash\92\CF\E9Zn7}\1E\B9\E7\00\B1.hardware_mapping\91\A3.cs\B0.spill_threshold\CE\FF\FF\FF\FF\A5.type\A2Cs\B0.user_data_limit\01\AF.xgl_cache_info\82\B3.128_bit_cache_hash\92\CF\B4X\B8\11[\A4\88P\CF\A0;\B0\AF\FF\B4\BE\C0\AD.llpc_version\A461.1\AEamdpal.version\92\03\06"}
+!1 = !{i32 7}
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll
index 830872a..d2f26e8 100644
--- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll
@@ -1,17 +1,14 @@
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr <%s | FileCheck %s --check-prefixes=CHECK,DVGPR
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK
; CHECK-LABEL: {{^}}_amdgpu_cs_main:
-; NODVGPR: ; TotalNumSgprs: 4
-; DVGPR: ; TotalNumSgprs: 34
+; CHECK: ; TotalNumSgprs: 4
; CHECK: ; NumVgprs: 2
; CHECK: .amdgpu_pal_metadata
; CHECK-NEXT: ---
; CHECK-NEXT: amdpal.pipelines:
; CHECK-NEXT: - .api: Vulkan
; CHECK-NEXT: .compute_registers:
-; DVGPR-NEXT: .dynamic_vgpr_en: true
; CHECK-NEXT: .tg_size_en: true
; CHECK-NEXT: .tgid_x_en: false
; CHECK-NEXT: .tgid_y_en: false
@@ -57,7 +54,6 @@
; CHECK-NEXT: .cs:
; CHECK-NEXT: .checksum_value: 0x9444d7d0
; CHECK-NEXT: .debug_mode: false
-; DVGPR-NEXT: .dynamic_vgpr_saved_count: 0x70
; CHECK-NOT: .entry_point: _amdgpu_cs_main
; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main
; CHECK-NEXT: .excp_en: 0
@@ -69,8 +65,7 @@
; CHECK-NEXT: .mem_ordered: true
; CHECK-NEXT: .scratch_en: false
; CHECK-NEXT: .scratch_memory_size: 0
-; NODVGPR-NEXT: .sgpr_count: 0x4
-; DVGPR-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .sgpr_count: 0x4
; CHECK-NEXT: .sgpr_limit: 0x6a
; CHECK-NEXT: .threadgroup_dimensions:
; CHECK-NEXT: - 0x1
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll b/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll
index c82b341..5bc9cdb 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll
@@ -256,7 +256,7 @@ endif: ; preds = %else, %if
define amdgpu_kernel void @copy1(ptr addrspace(1) %out, ptr addrspace(1) %in0) {
entry:
%tmp = load float, ptr addrspace(1) %in0
- %tmp1 = fcmp oeq float %tmp, 0.000000e+00
+ %tmp1 = fcmp one float %tmp, 0.000000e+00
br i1 %tmp1, label %if0, label %endif
if0: ; preds = %entry
diff --git a/llvm/test/CodeGen/ARM/combine-movc-sub.ll b/llvm/test/CodeGen/ARM/combine-movc-sub.ll
index ca5d089..8ca4c43 100644
--- a/llvm/test/CodeGen/ARM/combine-movc-sub.ll
+++ b/llvm/test/CodeGen/ARM/combine-movc-sub.ll
@@ -27,11 +27,11 @@ define hidden fastcc ptr @test(ptr %Search, ptr %ClauseList, i32 %Level, ptr noc
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr}
; CHECK-NEXT: sub sp, #4
-; CHECK-NEXT: sub.w r7, r2, #32
-; CHECK-NEXT: mov r8, r0
+; CHECK-NEXT: sub.w r8, r2, #32
+; CHECK-NEXT: mov r6, r0
; CHECK-NEXT: movs r0, #1
; CHECK-NEXT: mov r4, r2
-; CHECK-NEXT: add.w r6, r0, r7, lsr #5
+; CHECK-NEXT: add.w r7, r0, r8, lsr #5
; CHECK-NEXT: mov r5, r1
; CHECK-NEXT: mov.w r9, #0
; CHECK-NEXT: b .LBB0_2
@@ -44,16 +44,16 @@ define hidden fastcc ptr @test(ptr %Search, ptr %ClauseList, i32 %Level, ptr noc
; CHECK-NEXT: mov r2, r4
; CHECK-NEXT: cmp r4, #31
; CHECK-NEXT: ldr r0, [r1, #16]
-; CHECK-NEXT: add.w r0, r0, r6, lsl #2
+; CHECK-NEXT: add.w r0, r0, r7, lsl #2
; CHECK-NEXT: ldr r0, [r0, #40]
; CHECK-NEXT: it hi
-; CHECK-NEXT: andhi r2, r7, #31
+; CHECK-NEXT: andhi r2, r8, #31
; CHECK-NEXT: lsrs r0, r2
; CHECK-NEXT: lsls r0, r0, #31
; CHECK-NEXT: beq .LBB0_1
; CHECK-NEXT: @ %bb.3: @ %if.then
; CHECK-NEXT: @ in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: mov r0, r8
+; CHECK-NEXT: mov r0, r6
; CHECK-NEXT: bl foo
; CHECK-NEXT: str.w r9, [r5, #4]
; CHECK-NEXT: b .LBB0_1
diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll
index 77deaa5..d717806 100644
--- a/llvm/test/CodeGen/ARM/extract-bits.ll
+++ b/llvm/test/CodeGen/ARM/extract-bits.ll
@@ -316,28 +316,28 @@ define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
;
; V7A-LABEL: bextr64_a0:
; V7A: @ %bb.0:
-; V7A-NEXT: .save {r4, lr}
-; V7A-NEXT: push {r4, lr}
-; V7A-NEXT: ldr r12, [sp, #8]
-; V7A-NEXT: mov lr, #1
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr lr, [sp, #16]
+; V7A-NEXT: mov r5, #1
; V7A-NEXT: lsr r0, r0, r2
-; V7A-NEXT: rsb r3, r12, #32
-; V7A-NEXT: subs r4, r12, #32
-; V7A-NEXT: lsr r3, lr, r3
-; V7A-NEXT: lslpl r3, lr, r4
-; V7A-NEXT: lsl r4, lr, r12
-; V7A-NEXT: movwpl r4, #0
-; V7A-NEXT: subs r4, r4, #1
-; V7A-NEXT: sbc r12, r3, #0
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: orr r0, r0, r1, lsl r3
-; V7A-NEXT: subs r3, r2, #32
-; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: rsb r12, lr, #32
+; V7A-NEXT: subs r4, lr, #32
+; V7A-NEXT: lsr r3, r5, r12
+; V7A-NEXT: lslpl r3, r5, r4
+; V7A-NEXT: lsl r5, r5, lr
+; V7A-NEXT: movwpl r5, #0
+; V7A-NEXT: rsb r4, r2, #32
+; V7A-NEXT: subs r5, r5, #1
+; V7A-NEXT: sbc r3, r3, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r4
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r4
; V7A-NEXT: lsr r1, r1, r2
; V7A-NEXT: movwpl r1, #0
-; V7A-NEXT: and r0, r4, r0
-; V7A-NEXT: and r1, r12, r1
-; V7A-NEXT: pop {r4, pc}
+; V7A-NEXT: and r0, r5, r0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r4, r5, r11, pc}
;
; V7A-T-LABEL: bextr64_a0:
; V7A-T: @ %bb.0:
@@ -434,28 +434,28 @@ define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) n
;
; V7A-LABEL: bextr64_a0_arithmetic:
; V7A: @ %bb.0:
-; V7A-NEXT: .save {r4, lr}
-; V7A-NEXT: push {r4, lr}
-; V7A-NEXT: ldr r12, [sp, #8]
-; V7A-NEXT: mov lr, #1
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr lr, [sp, #16]
+; V7A-NEXT: mov r5, #1
; V7A-NEXT: lsr r0, r0, r2
-; V7A-NEXT: rsb r3, r12, #32
-; V7A-NEXT: subs r4, r12, #32
-; V7A-NEXT: lsr r3, lr, r3
-; V7A-NEXT: lslpl r3, lr, r4
-; V7A-NEXT: lsl r4, lr, r12
-; V7A-NEXT: movwpl r4, #0
-; V7A-NEXT: subs r4, r4, #1
-; V7A-NEXT: sbc r12, r3, #0
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: orr r0, r0, r1, lsl r3
-; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: rsb r12, lr, #32
+; V7A-NEXT: subs r4, lr, #32
+; V7A-NEXT: lsr r3, r5, r12
+; V7A-NEXT: lslpl r3, r5, r4
+; V7A-NEXT: lsl r5, r5, lr
+; V7A-NEXT: movwpl r5, #0
+; V7A-NEXT: rsb r4, r2, #32
+; V7A-NEXT: subs r5, r5, #1
+; V7A-NEXT: sbc r3, r3, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r4
+; V7A-NEXT: subs r4, r2, #32
; V7A-NEXT: asr r2, r1, r2
-; V7A-NEXT: asrpl r0, r1, r3
; V7A-NEXT: asrpl r2, r1, #31
-; V7A-NEXT: and r0, r4, r0
-; V7A-NEXT: and r1, r12, r2
-; V7A-NEXT: pop {r4, pc}
+; V7A-NEXT: asrpl r0, r1, r4
+; V7A-NEXT: and r1, r3, r2
+; V7A-NEXT: and r0, r5, r0
+; V7A-NEXT: pop {r4, r5, r11, pc}
;
; V7A-T-LABEL: bextr64_a0_arithmetic:
; V7A-T: @ %bb.0:
@@ -911,28 +911,28 @@ define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits)
;
; V7A-LABEL: bextr64_a4_commutative:
; V7A: @ %bb.0:
-; V7A-NEXT: .save {r4, lr}
-; V7A-NEXT: push {r4, lr}
-; V7A-NEXT: ldr r12, [sp, #8]
-; V7A-NEXT: mov lr, #1
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr lr, [sp, #16]
+; V7A-NEXT: mov r5, #1
; V7A-NEXT: lsr r0, r0, r2
-; V7A-NEXT: rsb r3, r12, #32
-; V7A-NEXT: subs r4, r12, #32
-; V7A-NEXT: lsr r3, lr, r3
-; V7A-NEXT: lslpl r3, lr, r4
-; V7A-NEXT: lsl r4, lr, r12
-; V7A-NEXT: movwpl r4, #0
-; V7A-NEXT: subs r4, r4, #1
-; V7A-NEXT: sbc r12, r3, #0
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: orr r0, r0, r1, lsl r3
-; V7A-NEXT: subs r3, r2, #32
-; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: rsb r12, lr, #32
+; V7A-NEXT: subs r4, lr, #32
+; V7A-NEXT: lsr r3, r5, r12
+; V7A-NEXT: lslpl r3, r5, r4
+; V7A-NEXT: lsl r5, r5, lr
+; V7A-NEXT: movwpl r5, #0
+; V7A-NEXT: rsb r4, r2, #32
+; V7A-NEXT: subs r5, r5, #1
+; V7A-NEXT: sbc r3, r3, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r4
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r4
; V7A-NEXT: lsr r1, r1, r2
; V7A-NEXT: movwpl r1, #0
-; V7A-NEXT: and r0, r0, r4
-; V7A-NEXT: and r1, r1, r12
-; V7A-NEXT: pop {r4, pc}
+; V7A-NEXT: and r0, r0, r5
+; V7A-NEXT: and r1, r1, r3
+; V7A-NEXT: pop {r4, r5, r11, pc}
;
; V7A-T-LABEL: bextr64_a4_commutative:
; V7A-T: @ %bb.0:
@@ -3456,22 +3456,22 @@ define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) noun
; V7M-NEXT: uxtb r2, r2
; V7M-NEXT: it pl
; V7M-NEXT: movpl r1, #0
-; V7M-NEXT: rsb.w r12, r2, #32
+; V7M-NEXT: rsb.w r3, r2, #32
; V7M-NEXT: lsls r1, r2
-; V7M-NEXT: sub.w r3, r2, #32
-; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: sub.w r12, r2, #32
+; V7M-NEXT: lsr.w r4, r0, r3
; V7M-NEXT: orrs r1, r4
-; V7M-NEXT: cmp r3, #0
+; V7M-NEXT: cmp.w r12, #0
; V7M-NEXT: it pl
-; V7M-NEXT: lslpl.w r1, r0, r3
+; V7M-NEXT: lslpl.w r1, r0, r12
; V7M-NEXT: lsl.w r0, r0, r2
-; V7M-NEXT: lsl.w r4, r1, r12
+; V7M-NEXT: lsl.w r3, r1, r3
; V7M-NEXT: it pl
; V7M-NEXT: movpl r0, #0
; V7M-NEXT: lsr.w r0, r0, r2
-; V7M-NEXT: orr.w r0, r0, r4
+; V7M-NEXT: orr.w r0, r0, r3
; V7M-NEXT: it pl
-; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsrpl.w r0, r1, r12
; V7M-NEXT: lsr.w r1, r1, r2
; V7M-NEXT: it pl
; V7M-NEXT: movpl r1, #0
@@ -3715,26 +3715,26 @@ define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
; V7M-NEXT: uxtb r2, r2
; V7M-NEXT: lsl.w r0, lr, r0
; V7M-NEXT: orr.w r0, r0, r12
-; V7M-NEXT: rsb.w r12, r2, #32
+; V7M-NEXT: sub.w r12, r2, #32
; V7M-NEXT: it pl
; V7M-NEXT: lsrpl.w r0, lr, r3
; V7M-NEXT: it pl
; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsb.w r3, r2, #32
; V7M-NEXT: lsls r1, r2
-; V7M-NEXT: sub.w r3, r2, #32
-; V7M-NEXT: lsr.w r4, r0, r12
-; V7M-NEXT: orrs r1, r4
-; V7M-NEXT: cmp r3, #0
+; V7M-NEXT: cmp.w r12, #0
+; V7M-NEXT: lsr.w r4, r0, r3
+; V7M-NEXT: orr.w r1, r1, r4
; V7M-NEXT: it pl
-; V7M-NEXT: lslpl.w r1, r0, r3
+; V7M-NEXT: lslpl.w r1, r0, r12
; V7M-NEXT: lsl.w r0, r0, r2
-; V7M-NEXT: lsl.w r4, r1, r12
; V7M-NEXT: it pl
; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r3, r1, r3
; V7M-NEXT: lsr.w r0, r0, r2
-; V7M-NEXT: orr.w r0, r0, r4
+; V7M-NEXT: orr.w r0, r0, r3
; V7M-NEXT: it pl
-; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsrpl.w r0, r1, r12
; V7M-NEXT: lsr.w r1, r1, r2
; V7M-NEXT: it pl
; V7M-NEXT: movpl r1, #0
diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll
index b483793..373d998 100644
--- a/llvm/test/CodeGen/ARM/extract-lowbits.ll
+++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll
@@ -243,15 +243,15 @@ define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
; V7A: @ %bb.0:
; V7A-NEXT: .save {r11, lr}
; V7A-NEXT: push {r11, lr}
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: mov r12, #1
-; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: rsb r12, r2, #32
+; V7A-NEXT: mov lr, #1
; V7A-NEXT: subs r3, r2, #32
-; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: lsl r2, lr, r2
+; V7A-NEXT: lsr r12, lr, r12
; V7A-NEXT: movwpl r2, #0
-; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: lslpl r12, lr, r3
; V7A-NEXT: subs r2, r2, #1
-; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: sbc r3, r12, #0
; V7A-NEXT: and r0, r2, r0
; V7A-NEXT: and r1, r3, r1
; V7A-NEXT: pop {r11, pc}
@@ -323,15 +323,15 @@ define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind {
; V7A-NEXT: .save {r11, lr}
; V7A-NEXT: push {r11, lr}
; V7A-NEXT: and r2, r2, #63
-; V7A-NEXT: mov r12, #1
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: rsb r12, r2, #32
; V7A-NEXT: subs r3, r2, #32
-; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: lsl r2, lr, r2
+; V7A-NEXT: lsr r12, lr, r12
; V7A-NEXT: movwpl r2, #0
-; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: lslpl r12, lr, r3
; V7A-NEXT: subs r2, r2, #1
-; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: sbc r3, r12, #0
; V7A-NEXT: and r0, r2, r0
; V7A-NEXT: and r1, r3, r1
; V7A-NEXT: pop {r11, pc}
@@ -404,15 +404,15 @@ define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
; V7A: @ %bb.0:
; V7A-NEXT: .save {r11, lr}
; V7A-NEXT: push {r11, lr}
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: mov r12, #1
-; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: rsb r12, r2, #32
+; V7A-NEXT: mov lr, #1
; V7A-NEXT: subs r3, r2, #32
-; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: lsl r2, lr, r2
+; V7A-NEXT: lsr r12, lr, r12
; V7A-NEXT: movwpl r2, #0
-; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: lslpl r12, lr, r3
; V7A-NEXT: subs r2, r2, #1
-; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: sbc r3, r12, #0
; V7A-NEXT: and r0, r2, r0
; V7A-NEXT: and r1, r3, r1
; V7A-NEXT: pop {r11, pc}
@@ -644,15 +644,15 @@ define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
; V7A: @ %bb.0:
; V7A-NEXT: .save {r11, lr}
; V7A-NEXT: push {r11, lr}
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: mov r12, #1
-; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: rsb r12, r2, #32
+; V7A-NEXT: mov lr, #1
; V7A-NEXT: subs r3, r2, #32
-; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: lsl r2, lr, r2
+; V7A-NEXT: lsr r12, lr, r12
; V7A-NEXT: movwpl r2, #0
-; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: lslpl r12, lr, r3
; V7A-NEXT: subs r2, r2, #1
-; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: sbc r3, r12, #0
; V7A-NEXT: and r0, r0, r2
; V7A-NEXT: and r1, r1, r3
; V7A-NEXT: pop {r11, pc}
@@ -2144,23 +2144,23 @@ define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind {
;
; V7A-LABEL: bzhi64_d2_load:
; V7A: @ %bb.0:
-; V7A-NEXT: .save {r5, r7, r11, lr}
-; V7A-NEXT: push {r5, r7, r11, lr}
+; V7A-NEXT: .save {r5, lr}
+; V7A-NEXT: push {r5, lr}
; V7A-NEXT: rsb r3, r2, #64
-; V7A-NEXT: ldm r0, {r0, r7}
-; V7A-NEXT: rsb r1, r3, #32
+; V7A-NEXT: ldm r0, {r0, r5}
+; V7A-NEXT: rsb r12, r3, #32
; V7A-NEXT: rsbs r2, r2, #32
-; V7A-NEXT: lsr r5, r0, r1
-; V7A-NEXT: orr r7, r5, r7, lsl r3
-; V7A-NEXT: lslpl r7, r0, r2
+; V7A-NEXT: lsr r1, r0, r12
+; V7A-NEXT: orr r1, r1, r5, lsl r3
+; V7A-NEXT: lslpl r1, r0, r2
; V7A-NEXT: lsl r0, r0, r3
; V7A-NEXT: movwpl r0, #0
; V7A-NEXT: lsr r0, r0, r3
-; V7A-NEXT: orr r0, r0, r7, lsl r1
-; V7A-NEXT: lsr r1, r7, r3
-; V7A-NEXT: lsrpl r0, r7, r2
+; V7A-NEXT: orr r0, r0, r1, lsl r12
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: lsr r1, r1, r3
; V7A-NEXT: movwpl r1, #0
-; V7A-NEXT: pop {r5, r7, r11, pc}
+; V7A-NEXT: pop {r5, pc}
;
; V7A-T-LABEL: bzhi64_d2_load:
; V7A-T: @ %bb.0:
@@ -2237,26 +2237,26 @@ define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
;
; V7A-LABEL: bzhi64_d3_load_indexzext:
; V7A: @ %bb.0:
-; V7A-NEXT: .save {r5, r7, r11, lr}
-; V7A-NEXT: push {r5, r7, r11, lr}
+; V7A-NEXT: .save {r5, lr}
+; V7A-NEXT: push {r5, lr}
; V7A-NEXT: rsb r1, r1, #64
-; V7A-NEXT: ldm r0, {r0, r7}
+; V7A-NEXT: ldm r0, {r0, r5}
; V7A-NEXT: uxtb r2, r1
-; V7A-NEXT: rsb r3, r2, #32
-; V7A-NEXT: lsr r5, r0, r3
-; V7A-NEXT: orr r7, r5, r7, lsl r2
+; V7A-NEXT: rsb r12, r2, #32
+; V7A-NEXT: lsr r3, r0, r12
+; V7A-NEXT: orr r3, r3, r5, lsl r2
; V7A-NEXT: mvn r5, #31
; V7A-NEXT: uxtab r1, r5, r1
; V7A-NEXT: cmp r1, #0
-; V7A-NEXT: lslpl r7, r0, r1
+; V7A-NEXT: lslpl r3, r0, r1
; V7A-NEXT: lsl r0, r0, r2
; V7A-NEXT: movwpl r0, #0
; V7A-NEXT: lsr r0, r0, r2
-; V7A-NEXT: orr r0, r0, r7, lsl r3
-; V7A-NEXT: lsrpl r0, r7, r1
-; V7A-NEXT: lsr r1, r7, r2
+; V7A-NEXT: orr r0, r0, r3, lsl r12
+; V7A-NEXT: lsrpl r0, r3, r1
+; V7A-NEXT: lsr r1, r3, r2
; V7A-NEXT: movwpl r1, #0
-; V7A-NEXT: pop {r5, r7, r11, pc}
+; V7A-NEXT: pop {r5, pc}
;
; V7A-T-LABEL: bzhi64_d3_load_indexzext:
; V7A-T: @ %bb.0:
diff --git a/llvm/test/CodeGen/ARM/llround-conv.ll b/llvm/test/CodeGen/ARM/llround-conv.ll
index 0f57e4a..f734db8 100644
--- a/llvm/test/CodeGen/ARM/llround-conv.ll
+++ b/llvm/test/CodeGen/ARM/llround-conv.ll
@@ -1,25 +1,71 @@
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
+; RUN: llc < %s -mtriple=armv8-none-eabihf -mattr=+fp-armv8,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+
+define i64 @testmsxh_builtin(half %x) {
+; CHECK-SOFT-LABEL: testmsxh_builtin:
+; CHECK-SOFT: @ %bb.0: @ %entry
+; CHECK-SOFT-NEXT: .save {r11, lr}
+; CHECK-SOFT-NEXT: push {r11, lr}
+; CHECK-SOFT-NEXT: bl __aeabi_h2f
+; CHECK-SOFT-NEXT: bl llroundf
+; CHECK-SOFT-NEXT: pop {r11, pc}
+;
+; CHECK-NOFP16-LABEL: testmsxh_builtin:
+; CHECK-NOFP16: @ %bb.0: @ %entry
+; CHECK-NOFP16-NEXT: .save {r11, lr}
+; CHECK-NOFP16-NEXT: push {r11, lr}
+; CHECK-NOFP16-NEXT: vmov r0, s0
+; CHECK-NOFP16-NEXT: bl __aeabi_h2f
+; CHECK-NOFP16-NEXT: vmov s0, r0
+; CHECK-NOFP16-NEXT: bl llroundf
+; CHECK-NOFP16-NEXT: pop {r11, pc}
+;
+; CHECK-FP16-LABEL: testmsxh_builtin:
+; CHECK-FP16: @ %bb.0: @ %entry
+; CHECK-FP16-NEXT: .save {r11, lr}
+; CHECK-FP16-NEXT: push {r11, lr}
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl llroundf
+; CHECK-FP16-NEXT: pop {r11, pc}
+entry:
+ %0 = tail call i64 @llvm.llround.i64.f16(half %x)
+ ret i64 %0
+}
-; SOFTFP-LABEL: testmsxs_builtin:
-; SOFTFP: bl llroundf
-; HARDFP-LABEL: testmsxs_builtin:
-; HARDFP: bl llroundf
define i64 @testmsxs_builtin(float %x) {
+; CHECK-LABEL: testmsxs_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llroundf
+; CHECK-NEXT: pop {r11, pc}
entry:
- %0 = tail call i64 @llvm.llround.f32(float %x)
+ %0 = tail call i64 @llvm.llround.i64.f32(float %x)
ret i64 %0
}
-; SOFTFP-LABEL: testmsxd_builtin:
-; SOFTFP: bl llround
-; HARDFP-LABEL: testmsxd_builtin:
-; HARDFP: bl llround
define i64 @testmsxd_builtin(double %x) {
+; CHECK-LABEL: testmsxd_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llround
+; CHECK-NEXT: pop {r11, pc}
entry:
- %0 = tail call i64 @llvm.llround.f64(double %x)
+ %0 = tail call i64 @llvm.llround.i64.f64(double %x)
ret i64 %0
}
-declare i64 @llvm.llround.f32(float) nounwind readnone
-declare i64 @llvm.llround.f64(double) nounwind readnone
+define i64 @testmsxq_builtin(fp128 %x) {
+; CHECK-LABEL: testmsxq_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llroundl
+; CHECK-NEXT: pop {r11, pc}
+entry:
+ %0 = tail call i64 @llvm.llround.i64.f128(fp128 %x)
+ ret i64 %0
+}
diff --git a/llvm/test/CodeGen/ARM/lround-conv.ll b/llvm/test/CodeGen/ARM/lround-conv.ll
index 3aaed74..03f7a0d 100644
--- a/llvm/test/CodeGen/ARM/lround-conv.ll
+++ b/llvm/test/CodeGen/ARM/lround-conv.ll
@@ -1,25 +1,47 @@
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
+; RUN: llc < %s -mtriple=armv8-none-eabihf -mattr=+fp-armv8 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FPv8
+; RUN: llc < %s -mtriple=armv8-none-eabihf -mattr=+fp-armv8,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+
+;define i32 @testmswh_builtin(half %x) {
+;entry:
+; %0 = tail call i32 @llvm.lround.i32.f16(half %x)
+; ret i32 %0
+;}
-; SOFTFP-LABEL: testmsws_builtin:
-; SOFTFP: bl lroundf
-; HARDFP-LABEL: testmsws_builtin:
-; HARDFP: bl lroundf
define i32 @testmsws_builtin(float %x) {
+; CHECK-LABEL: testmsws_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b lroundf
entry:
%0 = tail call i32 @llvm.lround.i32.f32(float %x)
ret i32 %0
}
-; SOFTFP-LABEL: testmswd_builtin:
-; SOFTFP: bl lround
-; HARDFP-LABEL: testmswd_builtin:
-; HARDFP: bl lround
define i32 @testmswd_builtin(double %x) {
+; CHECK-LABEL: testmswd_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b lround
entry:
%0 = tail call i32 @llvm.lround.i32.f64(double %x)
ret i32 %0
}
-declare i32 @llvm.lround.i32.f32(float) nounwind readnone
-declare i32 @llvm.lround.i32.f64(double) nounwind readnone
+define i32 @testmswq_builtin(fp128 %x) {
+; CHECK-LABEL: testmswq_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl lroundl
+; CHECK-NEXT: pop {r11, pc}
+entry:
+ %0 = tail call i32 @llvm.lround.i32.f128(fp128 %x)
+ ret i32 %0
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-FP16: {{.*}}
+; CHECK-FPv8: {{.*}}
+; CHECK-NOFP16: {{.*}}
+; CHECK-SOFT: {{.*}}
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-Flag-LargeNumber.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-Flag-LargeNumber.ll
new file mode 100644
index 0000000..c27c87f
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-Flag-LargeNumber.ll
@@ -0,0 +1,20 @@
+; RUN: not opt -passes='print<dxil-root-signature>' %s -S -o - 2>&1 | FileCheck %s
+
+target triple = "dxil-unknown-shadermodel6.0-compute"
+
+; CHECK: error: Invalid value for DescriptorFlag: 66666
+; CHECK-NOT: Root Signature Definitions
+
+define void @main() #0 {
+entry:
+ ret void
+}
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
+!dx.rootsignatures = !{!2} ; list of function/root signature pairs
+!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
+!3 = !{ !5 } ; list of root signature elements
+!5 = !{ !"DescriptorTable", i32 0, !6, !7 }
+!6 = !{ !"SRV", i32 1, i32 1, i32 0, i32 -1, i32 66666 }
+!7 = !{ !"UAV", i32 5, i32 1, i32 10, i32 5, i32 2 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-Flags-LargeNumber.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-Flags-LargeNumber.ll
new file mode 100644
index 0000000..898e197
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-Flags-LargeNumber.ll
@@ -0,0 +1,18 @@
+; RUN: not opt -passes='print<dxil-root-signature>' %s -S -o - 2>&1 | FileCheck %s
+
+target triple = "dxil-unknown-shadermodel6.0-compute"
+
+
+; CHECK: error: Invalid value for RootDescriptorFlag: 666
+; CHECK-NOT: Root Signature Definitions
+define void @main() #0 {
+entry:
+ ret void
+}
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
+!dx.rootsignatures = !{!2} ; list of function/root signature pairs
+!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
+!3 = !{ !5 } ; list of root signature elements
+!5 = !{ !"RootCBV", i32 0, i32 1, i32 2, i32 666 }
diff --git a/llvm/test/CodeGen/NVPTX/convert-sm103a.ll b/llvm/test/CodeGen/NVPTX/convert-sm103a.ll
new file mode 100644
index 0000000..54b4dd8
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/convert-sm103a.ll
@@ -0,0 +1,297 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx87 | FileCheck %s
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_103a -mattr=+ptx87 | FileCheck %s
+; RUN: %if ptxas-sm_100a && ptxas-isa-8.7 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx87 | %ptxas-verify -arch=sm_100a %}
+; RUN: %if ptxas-sm_103a && ptxas-isa-8.7 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_103a -mattr=+ptx87 | %ptxas-verify -arch=sm_103a %}
+
+; F16X2 conversions
+
+define <2 x half> @cvt_rs_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+define <2 x half> @cvt_rs_relu_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+define <2 x half> @cvt_rs_sf_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_sf_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_sf_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_sf_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.satfinite.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+define <2 x half> @cvt_rs_relu_sf_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_sf_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_sf_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_sf_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.satfinite.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+; BF16X2 conversions
+
+define <2 x bfloat> @cvt_rs_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+define <2 x bfloat> @cvt_rs_relu_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+define <2 x bfloat> @cvt_rs_sf_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_sf_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_sf_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_sf_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.satfinite.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+define <2 x bfloat> @cvt_rs_relu_sf_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_sf_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_sf_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_sf_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.satfinite.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+; F8X4 conversions
+
+define <4 x i8> @cvt_rs_sf_e4m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e4m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e4m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e4m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e4m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e4m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e4m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e4m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e4m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e4m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_sf_e5m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e5m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e5m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e5m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e5m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e5m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e5m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e5m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e5m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e5m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+; F6X4 conversions
+
+define <4 x i8> @cvt_rs_sf_e2m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e2m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e2m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e2m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e2m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e2m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e2m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e2m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e2m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e2m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_sf_e3m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e3m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e3m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e3m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e3m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e3m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e3m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e3m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e3m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e3m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+; F4X4 conversions
+
+define i16 @cvt_rs_sf_e2m1x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e2m1x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e2m1x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e2m1x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e2m1x4.f32 %rs1, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: cvt.u32.u16 %r6, %rs1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret i16 %val
+}
+
+define i16 @cvt_rs_relu_sf_e2m1x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e2m1x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e2m1x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e2m1x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e2m1x4.f32 %rs1, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: cvt.u32.u16 %r6, %rs1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret i16 %val
+}
diff --git a/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py b/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py
index ae781df..40055ae 100644
--- a/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py
+++ b/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py
@@ -2,7 +2,7 @@
# RUN: %python %s --ptx=87 --gpu-arch=120 --aa > %t-ptx87-sm_120a.ll
# RUN: llc < %t-ptx87-sm_120a.ll -mtriple=nvptx64 -mcpu=sm_120a -mattr=+ptx87 \
# RUN: | FileCheck %t-ptx87-sm_120a.ll
-# RUN: %if ptxas-12.7 %{ \
+# RUN: %if ptxas-sm_120a && ptxas-isa-8.7 %{ \
# RUN: llc < %t-ptx87-sm_120a.ll -mtriple=nvptx64 -mcpu=sm_120a -mattr=+ptx87 \
# RUN: | %ptxas-verify -arch=sm_120a \
# RUN: %}
diff --git a/llvm/test/CodeGen/NVPTX/wmma.py b/llvm/test/CodeGen/NVPTX/wmma.py
index 6d73bce..8427ae4 100644
--- a/llvm/test/CodeGen/NVPTX/wmma.py
+++ b/llvm/test/CodeGen/NVPTX/wmma.py
@@ -90,6 +90,21 @@ class MMAFrag:
"m16n8k32:b:s8": 2,
"m16n8k32:c:s32": 4,
"m16n8k32:d:s32": 4,
+ # e4m3/e5m2/e3m2/e2m3/e2m1 -> f16/f32 @ m16n8k16/m16n8k32
+ "m16n8k16:a:e4m3": 2,
+ "m16n8k16:a:e5m2": 2,
+ "m16n8k32:a:e4m3": 4,
+ "m16n8k32:a:e5m2": 4,
+ "m16n8k32:a:e3m2": 4,
+ "m16n8k32:a:e2m3": 4,
+ "m16n8k32:a:e2m1": 4,
+ "m16n8k16:b:e4m3": 1,
+ "m16n8k16:b:e5m2": 1,
+ "m16n8k32:b:e4m3": 2,
+ "m16n8k32:b:e5m2": 2,
+ "m16n8k32:b:e3m2": 2,
+ "m16n8k32:b:e2m3": 2,
+ "m16n8k32:b:e2m1": 2,
# mma sp
"m16n8k32:a:bf16": 4,
"m16n8k32:a:f16": 4,
@@ -182,6 +197,18 @@ class MMAFrag:
"m8n8k4:b:f64": 1,
"m8n8k4:c:f64": 2,
"m8n8k4:d:f64": 2,
+ "m16n8k4:a:f64": 2,
+ "m16n8k4:b:f64": 1,
+ "m16n8k4:c:f64": 4,
+ "m16n8k4:d:f64": 4,
+ "m16n8k8:a:f64": 4,
+ "m16n8k8:b:f64": 2,
+ "m16n8k8:c:f64": 4,
+ "m16n8k8:d:f64": 4,
+ "m16n8k16:a:f64": 8,
+ "m16n8k16:b:f64": 4,
+ "m16n8k16:c:f64": 4,
+ "m16n8k16:d:f64": 4,
# tf32 -> s32 @ m16n16k8
"m16n16k8:a:tf32": 4,
"m16n16k8:b:tf32": 4,
@@ -324,7 +351,9 @@ def get_wmma_ops():
def get_mma_ops():
return (
- make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], [])
+ make_mma_ops(
+ ["m8n8k4", "m16n8k4", "m16n8k8", "m16n8k16"], ["f64"], [], ["f64"], []
+ )
+ make_mma_ops(["m16n8k4", "m16n8k8"], ["tf32"], [], ["f32"], [])
+ make_mma_ops(["m16n8k16", "m16n8k8"], ["bf16"], [], ["f32"], [])
+ make_mma_ops(
@@ -341,6 +370,20 @@ def get_mma_ops():
["m8n8k32", "m16n8k32", "m16n8k64"], ["s4", "u4"], ["s4", "u4"], ["s32"], []
)
+ make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"], ["b1"], [], ["s32"], [])
+ + make_mma_ops(
+ ["m16n8k16"],
+ ["e4m3", "e5m2"],
+ ["e4m3", "e5m2"],
+ ["f16", "f32"],
+ ["f16", "f32"],
+ )
+ + make_mma_ops(
+ ["m16n8k32"],
+ ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+ ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+ ["f16", "f32"],
+ ["f16", "f32"],
+ )
)
@@ -492,7 +535,7 @@ def is_wmma_variant_supported(op, layout_a, layout_b, rnd, satf):
return True
-def is_mma_variant_supported(op, layout_a, layout_b, satf):
+def is_mma_variant_supported(op, layout_a, layout_b, kind, satf):
if not (
is_type_supported(op.a.mma_type.ptx_type) and is_mma_geom_supported(op.a.geom)
):
@@ -516,13 +559,53 @@ def is_mma_variant_supported(op, layout_a, layout_b, satf):
):
return False
+ if (
+ op.a.geom != "m8n8k4"
+ and op.a.mma_type.ptx_type == "f64"
+ and (ptx_version < 78 or gpu_arch < 90)
+ ):
+ return False
+
# C and D type must be the same
- if op.a.geom == "m16n8k16" and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type:
+ if (
+ op.a.geom in ["m16n8k16", "m16n8k32"]
+ and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type
+ ):
+ return False
+
+ if (
+ op.a.geom in ["m16n8k16", "m16n8k32"]
+ and any(
+ x in ["e4m3", "e5m2"]
+ for x in (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
+ )
+ and ptx_version < 87
+ ):
+ return False
+
+ if kind != "" and not (ptx_version >= 87 and gpu_arch >= 120 and aa):
+ return False
+
+ if kind != "" and (
+ op.a.geom != "m16n8k32"
+ or op.a.mma_type.ptx_type not in ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"]
+ ):
+ return False
+
+ if (
+ kind == ""
+ and op.a.geom in ["m16n8k16", "m16n8k32"]
+ and any(
+ x in ["e3m2", "e2m3", "e2m1"]
+ for x in (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
+ )
+ ):
return False
# Require row/col layout for all MMA except m8n8k4 on FP16
if not (op.a.geom == "m8n8k4" and op.a.mma_type.ptx_type == "f16"):
return layout_a == "row" and layout_b == "col"
+
return True
@@ -937,7 +1020,12 @@ define ${ret_ty} @test_${function}(
"""
test_params = params
- test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+ test_params["intrinsic"] = (
+ Template(intrinsic_template)
+ .substitute(params)
+ .replace("::", ".")
+ .replace("_", ".")
+ )
test_params["function"] = test_params["intrinsic"].replace(".", "_")
test_params["instruction"] = Template(instruction_template).substitute(params)
test_params["ret_ty"] = make_wmma_ld_ret_ty(op.d)
@@ -1002,16 +1090,20 @@ def gen_wmma_mma_tests():
def gen_mma_tests():
- mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${satf}.${intrinsic_signature}"
- mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${satf}.${ptx_signature}${b1op}"
+ mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${kind}${satf}.${intrinsic_signature}"
+ mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${kind}${satf}.${ptx_signature}${b1op}"
generated_items = []
- for op, alayout, blayout, satf in product(
- get_mma_ops(), ["row", "col"], ["row", "col"], [".satfinite", ""]
+ for op, alayout, blayout, kind, satf in product(
+ get_mma_ops(),
+ ["row", "col"],
+ ["row", "col"],
+ ["", ".kind::f8f6f4"],
+ [".satfinite", ""],
):
- if not is_mma_variant_supported(op, alayout, blayout, satf):
+ if not is_mma_variant_supported(op, alayout, blayout, kind, satf):
continue
for b1op in get_b1_ops(op.a.mma_type.ptx_type):
@@ -1024,6 +1116,7 @@ def gen_mma_tests():
"satf": satf,
"geom": op.a.geom,
"b1op": b1op,
+ "kind": kind,
}
intrinsic_template = mma_intrinsic_template
@@ -1105,9 +1198,9 @@ def is_mma_sp_variant_supported(op, metadata, kind, satf):
):
return False
- # C and D type must be the same for m16n8k16/m16n8k32
+ # C and D type must be the same for m16n8k16/m16n8k32/m16n8k64
if (
- op.a.geom in ["m16n8k16", "m16n8k32"]
+ op.a.geom in ["m16n8k16", "m16n8k32", "m16n8k64"]
and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type
):
return False
diff --git a/llvm/test/CodeGen/PowerPC/vec-nmsub.ll b/llvm/test/CodeGen/PowerPC/vec-nmsub.ll
new file mode 100644
index 0000000..8f4ac972
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/vec-nmsub.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs < %s -mcpu=pwr5 -mtriple=ppc32-- -mattr=+altivec | FileCheck %s
+
+define dso_local <4 x float> @intrinsic(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %c) local_unnamed_addr {
+; CHECK-LABEL: intrinsic:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vnmsubfp 2, 2, 3, 4
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call <4 x float> @llvm.ppc.altivec.vnmsubfp(<4 x float> %a, <4 x float> %b, <4 x float> %c)
+ ret <4 x float> %0
+}
+
+define <4 x float> @manual_llvm_fma(<4 x float> %a, <4 x float> %b, <4 x float> %c) unnamed_addr {
+; CHECK-LABEL: manual_llvm_fma:
+; CHECK: # %bb.0: # %start
+; CHECK-NEXT: vnmsubfp 2, 2, 3, 4
+; CHECK-NEXT: blr
+start:
+ %0 = fneg <4 x float> %c
+ %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %0)
+ %2 = fneg <4 x float> %1
+ ret <4 x float> %2
+}
+
+define dso_local <4 x float> @manual_vmaddfp(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %c) local_unnamed_addr {
+; CHECK-LABEL: manual_vmaddfp:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vnmsubfp 2, 2, 3, 4
+; CHECK-NEXT: blr
+entry:
+ %fneg.i3 = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.ppc.altivec.vmaddfp(<4 x float> %a, <4 x float> %b, <4 x float> %fneg.i3)
+ %fneg.i = fneg <4 x float> %0
+ ret <4 x float> %fneg.i
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store-fp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store-fp.ll
new file mode 100644
index 0000000..4ad2d2c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store-fp.ll
@@ -0,0 +1,950 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+d -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+d,+a,+no-trailing-seq-cst-fence \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32IA,RV32IA-WMO %s
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+d,+a,+ztso,+no-trailing-seq-cst-fence \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32IA,RV32IA-TSO %s
+; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+d,+a,+no-trailing-seq-cst-fence \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64IA,RV64IA-WMO %s
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+d,+a,+ztso,+no-trailing-seq-cst-fence \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64IA,RV64IA-TSO %s
+
+
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+d,+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-WMO-TRAILING-FENCE %s
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+d,+a,+ztso -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-TSO-TRAILING-FENCE %s
+
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+d,+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO-TRAILING-FENCE %s
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+d,+a,+ztso -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO-TRAILING-FENCE %s
+
+
+define float @atomic_load_f32_unordered(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f32_unordered:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: call __atomic_load_4
+; RV32I-NEXT: fmv.w.x fa0, a0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_load_f32_unordered:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: lw a0, 0(a0)
+; RV32IA-NEXT: fmv.w.x fa0, a0
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f32_unordered:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 0
+; RV64I-NEXT: call __atomic_load_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_load_f32_unordered:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lw a0, 0(a0)
+; RV64IA-NEXT: fmv.w.x fa0, a0
+; RV64IA-NEXT: ret
+ %1 = load atomic float, ptr %a unordered, align 4
+ ret float %1
+}
+
+define float @atomic_load_f32_monotonic(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f32_monotonic:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: call __atomic_load_4
+; RV32I-NEXT: fmv.w.x fa0, a0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_load_f32_monotonic:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: lw a0, 0(a0)
+; RV32IA-NEXT: fmv.w.x fa0, a0
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f32_monotonic:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 0
+; RV64I-NEXT: call __atomic_load_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_load_f32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lw a0, 0(a0)
+; RV64IA-NEXT: fmv.w.x fa0, a0
+; RV64IA-NEXT: ret
+ %1 = load atomic float, ptr %a monotonic, align 4
+ ret float %1
+}
+
+define float @atomic_load_f32_acquire(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f32_acquire:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 2
+; RV32I-NEXT: call __atomic_load_4
+; RV32I-NEXT: fmv.w.x fa0, a0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-WMO-LABEL: atomic_load_f32_acquire:
+; RV32IA-WMO: # %bb.0:
+; RV32IA-WMO-NEXT: lw a0, 0(a0)
+; RV32IA-WMO-NEXT: fence r, rw
+; RV32IA-WMO-NEXT: fmv.w.x fa0, a0
+; RV32IA-WMO-NEXT: ret
+;
+; RV32IA-TSO-LABEL: atomic_load_f32_acquire:
+; RV32IA-TSO: # %bb.0:
+; RV32IA-TSO-NEXT: lw a0, 0(a0)
+; RV32IA-TSO-NEXT: fmv.w.x fa0, a0
+; RV32IA-TSO-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f32_acquire:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 2
+; RV64I-NEXT: call __atomic_load_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_load_f32_acquire:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: lw a0, 0(a0)
+; RV64IA-WMO-NEXT: fence r, rw
+; RV64IA-WMO-NEXT: fmv.w.x fa0, a0
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_load_f32_acquire:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: lw a0, 0(a0)
+; RV64IA-TSO-NEXT: fmv.w.x fa0, a0
+; RV64IA-TSO-NEXT: ret
+;
+; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_f32_acquire:
+; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV32IA-WMO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_f32_acquire:
+; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV32IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_f32_acquire:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_f32_acquire:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ %1 = load atomic float, ptr %a acquire, align 4
+ ret float %1
+}
+
+define float @atomic_load_f32_seq_cst(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f32_seq_cst:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 5
+; RV32I-NEXT: call __atomic_load_4
+; RV32I-NEXT: fmv.w.x fa0, a0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-WMO-LABEL: atomic_load_f32_seq_cst:
+; RV32IA-WMO: # %bb.0:
+; RV32IA-WMO-NEXT: fence rw, rw
+; RV32IA-WMO-NEXT: lw a0, 0(a0)
+; RV32IA-WMO-NEXT: fence r, rw
+; RV32IA-WMO-NEXT: fmv.w.x fa0, a0
+; RV32IA-WMO-NEXT: ret
+;
+; RV32IA-TSO-LABEL: atomic_load_f32_seq_cst:
+; RV32IA-TSO: # %bb.0:
+; RV32IA-TSO-NEXT: fence rw, rw
+; RV32IA-TSO-NEXT: lw a0, 0(a0)
+; RV32IA-TSO-NEXT: fmv.w.x fa0, a0
+; RV32IA-TSO-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f32_seq_cst:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 5
+; RV64I-NEXT: call __atomic_load_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_load_f32_seq_cst:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: fence rw, rw
+; RV64IA-WMO-NEXT: lw a0, 0(a0)
+; RV64IA-WMO-NEXT: fence r, rw
+; RV64IA-WMO-NEXT: fmv.w.x fa0, a0
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_load_f32_seq_cst:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: fence rw, rw
+; RV64IA-TSO-NEXT: lw a0, 0(a0)
+; RV64IA-TSO-NEXT: fmv.w.x fa0, a0
+; RV64IA-TSO-NEXT: ret
+;
+; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_f32_seq_cst:
+; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV32IA-WMO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_f32_seq_cst:
+; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV32IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_f32_seq_cst:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_f32_seq_cst:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.w.x fa0, a0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ %1 = load atomic float, ptr %a seq_cst, align 4
+ ret float %1
+}
+
+define double @atomic_load_f64_unordered(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f64_unordered:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: call __atomic_load_8
+; RV32I-NEXT: sw a0, 0(sp)
+; RV32I-NEXT: sw a1, 4(sp)
+; RV32I-NEXT: fld fa0, 0(sp)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_load_f64_unordered:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: li a1, 0
+; RV32IA-NEXT: call __atomic_load_8
+; RV32IA-NEXT: sw a0, 0(sp)
+; RV32IA-NEXT: sw a1, 4(sp)
+; RV32IA-NEXT: fld fa0, 0(sp)
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f64_unordered:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 0
+; RV64I-NEXT: call __atomic_load_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_load_f64_unordered:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: ld a0, 0(a0)
+; RV64IA-NEXT: fmv.d.x fa0, a0
+; RV64IA-NEXT: ret
+ %1 = load atomic double, ptr %a unordered, align 8
+ ret double %1
+}
+
+define double @atomic_load_f64_monotonic(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f64_monotonic:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: call __atomic_load_8
+; RV32I-NEXT: sw a0, 0(sp)
+; RV32I-NEXT: sw a1, 4(sp)
+; RV32I-NEXT: fld fa0, 0(sp)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_load_f64_monotonic:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: li a1, 0
+; RV32IA-NEXT: call __atomic_load_8
+; RV32IA-NEXT: sw a0, 0(sp)
+; RV32IA-NEXT: sw a1, 4(sp)
+; RV32IA-NEXT: fld fa0, 0(sp)
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f64_monotonic:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 0
+; RV64I-NEXT: call __atomic_load_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_load_f64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: ld a0, 0(a0)
+; RV64IA-NEXT: fmv.d.x fa0, a0
+; RV64IA-NEXT: ret
+ %1 = load atomic double, ptr %a monotonic, align 8
+ ret double %1
+}
+
+define double @atomic_load_f64_acquire(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f64_acquire:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 2
+; RV32I-NEXT: call __atomic_load_8
+; RV32I-NEXT: sw a0, 0(sp)
+; RV32I-NEXT: sw a1, 4(sp)
+; RV32I-NEXT: fld fa0, 0(sp)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_load_f64_acquire:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: li a1, 2
+; RV32IA-NEXT: call __atomic_load_8
+; RV32IA-NEXT: sw a0, 0(sp)
+; RV32IA-NEXT: sw a1, 4(sp)
+; RV32IA-NEXT: fld fa0, 0(sp)
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f64_acquire:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 2
+; RV64I-NEXT: call __atomic_load_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_load_f64_acquire:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: ld a0, 0(a0)
+; RV64IA-WMO-NEXT: fence r, rw
+; RV64IA-WMO-NEXT: fmv.d.x fa0, a0
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_load_f64_acquire:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: ld a0, 0(a0)
+; RV64IA-TSO-NEXT: fmv.d.x fa0, a0
+; RV64IA-TSO-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_f64_acquire:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ld a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.d.x fa0, a0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_f64_acquire:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ld a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.d.x fa0, a0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ %1 = load atomic double, ptr %a acquire, align 8
+ ret double %1
+}
+
+define double @atomic_load_f64_seq_cst(ptr %a) nounwind {
+; RV32I-LABEL: atomic_load_f64_seq_cst:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a1, 5
+; RV32I-NEXT: call __atomic_load_8
+; RV32I-NEXT: sw a0, 0(sp)
+; RV32I-NEXT: sw a1, 4(sp)
+; RV32I-NEXT: fld fa0, 0(sp)
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_load_f64_seq_cst:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: li a1, 5
+; RV32IA-NEXT: call __atomic_load_8
+; RV32IA-NEXT: sw a0, 0(sp)
+; RV32IA-NEXT: sw a1, 4(sp)
+; RV32IA-NEXT: fld fa0, 0(sp)
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_load_f64_seq_cst:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a1, 5
+; RV64I-NEXT: call __atomic_load_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_load_f64_seq_cst:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: fence rw, rw
+; RV64IA-WMO-NEXT: ld a0, 0(a0)
+; RV64IA-WMO-NEXT: fence r, rw
+; RV64IA-WMO-NEXT: fmv.d.x fa0, a0
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_load_f64_seq_cst:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: fence rw, rw
+; RV64IA-TSO-NEXT: ld a0, 0(a0)
+; RV64IA-TSO-NEXT: fmv.d.x fa0, a0
+; RV64IA-TSO-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_f64_seq_cst:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ld a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.d.x fa0, a0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_f64_seq_cst:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ld a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.d.x fa0, a0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ %1 = load atomic double, ptr %a seq_cst, align 8
+ ret double %1
+}
+
+define void @atomic_store_f32_unordered(ptr %a, float %b) nounwind {
+; RV32I-LABEL: atomic_store_f32_unordered:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: fmv.x.w a1, fa0
+; RV32I-NEXT: li a2, 0
+; RV32I-NEXT: call __atomic_store_4
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_store_f32_unordered:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: fmv.x.w a1, fa0
+; RV32IA-NEXT: sw a1, 0(a0)
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f32_unordered:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 0
+; RV64I-NEXT: call __atomic_store_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_store_f32_unordered:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: fmv.x.w a1, fa0
+; RV64IA-NEXT: sw a1, 0(a0)
+; RV64IA-NEXT: ret
+ store atomic float %b, ptr %a unordered, align 4
+ ret void
+}
+
+define void @atomic_store_f32_monotonic(ptr %a, float %b) nounwind {
+; RV32I-LABEL: atomic_store_f32_monotonic:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: fmv.x.w a1, fa0
+; RV32I-NEXT: li a2, 0
+; RV32I-NEXT: call __atomic_store_4
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_store_f32_monotonic:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: fmv.x.w a1, fa0
+; RV32IA-NEXT: sw a1, 0(a0)
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f32_monotonic:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 0
+; RV64I-NEXT: call __atomic_store_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_store_f32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: fmv.x.w a1, fa0
+; RV64IA-NEXT: sw a1, 0(a0)
+; RV64IA-NEXT: ret
+ store atomic float %b, ptr %a monotonic, align 4
+ ret void
+}
+
+define void @atomic_store_f32_release(ptr %a, float %b) nounwind {
+; RV32I-LABEL: atomic_store_f32_release:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a2, 3
+; RV32I-NEXT: fmv.x.w a1, fa0
+; RV32I-NEXT: call __atomic_store_4
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-WMO-LABEL: atomic_store_f32_release:
+; RV32IA-WMO: # %bb.0:
+; RV32IA-WMO-NEXT: fence rw, w
+; RV32IA-WMO-NEXT: fmv.x.w a1, fa0
+; RV32IA-WMO-NEXT: sw a1, 0(a0)
+; RV32IA-WMO-NEXT: ret
+;
+; RV32IA-TSO-LABEL: atomic_store_f32_release:
+; RV32IA-TSO: # %bb.0:
+; RV32IA-TSO-NEXT: fmv.x.w a1, fa0
+; RV32IA-TSO-NEXT: sw a1, 0(a0)
+; RV32IA-TSO-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f32_release:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 3
+; RV64I-NEXT: call __atomic_store_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_store_f32_release:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: fence rw, w
+; RV64IA-WMO-NEXT: fmv.x.w a1, fa0
+; RV64IA-WMO-NEXT: sw a1, 0(a0)
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_store_f32_release:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: fmv.x.w a1, fa0
+; RV64IA-TSO-NEXT: sw a1, 0(a0)
+; RV64IA-TSO-NEXT: ret
+;
+; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_store_f32_release:
+; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, w
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV32IA-WMO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_store_f32_release:
+; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV32IA-TSO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV32IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_store_f32_release:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, w
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_store_f32_release:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ store atomic float %b, ptr %a release, align 4
+ ret void
+}
+
+define void @atomic_store_f32_seq_cst(ptr %a, float %b) nounwind {
+; RV32I-LABEL: atomic_store_f32_seq_cst:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a2, 5
+; RV32I-NEXT: fmv.x.w a1, fa0
+; RV32I-NEXT: call __atomic_store_4
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-WMO-LABEL: atomic_store_f32_seq_cst:
+; RV32IA-WMO: # %bb.0:
+; RV32IA-WMO-NEXT: fence rw, w
+; RV32IA-WMO-NEXT: fmv.x.w a1, fa0
+; RV32IA-WMO-NEXT: sw a1, 0(a0)
+; RV32IA-WMO-NEXT: ret
+;
+; RV32IA-TSO-LABEL: atomic_store_f32_seq_cst:
+; RV32IA-TSO: # %bb.0:
+; RV32IA-TSO-NEXT: fmv.x.w a1, fa0
+; RV32IA-TSO-NEXT: sw a1, 0(a0)
+; RV32IA-TSO-NEXT: fence rw, rw
+; RV32IA-TSO-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f32_seq_cst:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 5
+; RV64I-NEXT: call __atomic_store_4
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_store_f32_seq_cst:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: fence rw, w
+; RV64IA-WMO-NEXT: fmv.x.w a1, fa0
+; RV64IA-WMO-NEXT: sw a1, 0(a0)
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_store_f32_seq_cst:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: fmv.x.w a1, fa0
+; RV64IA-TSO-NEXT: sw a1, 0(a0)
+; RV64IA-TSO-NEXT: fence rw, rw
+; RV64IA-TSO-NEXT: ret
+;
+; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_store_f32_seq_cst:
+; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, w
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV32IA-WMO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_store_f32_seq_cst:
+; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV32IA-TSO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV32IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_store_f32_seq_cst:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, w
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_store_f32_seq_cst:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.x.w a1, fa0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ store atomic float %b, ptr %a seq_cst, align 4
+ ret void
+}
+
+define void @atomic_store_f64_unordered(ptr %a, double %b) nounwind {
+; RV32I-LABEL: atomic_store_f64_unordered:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: fsd fa0, 0(sp)
+; RV32I-NEXT: lw a1, 0(sp)
+; RV32I-NEXT: lw a2, 4(sp)
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __atomic_store_8
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_store_f64_unordered:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: fsd fa0, 0(sp)
+; RV32IA-NEXT: lw a1, 0(sp)
+; RV32IA-NEXT: lw a2, 4(sp)
+; RV32IA-NEXT: li a3, 0
+; RV32IA-NEXT: call __atomic_store_8
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f64_unordered:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 0
+; RV64I-NEXT: call __atomic_store_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_store_f64_unordered:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: fmv.x.d a1, fa0
+; RV64IA-NEXT: sd a1, 0(a0)
+; RV64IA-NEXT: ret
+ store atomic double %b, ptr %a unordered, align 8
+ ret void
+}
+
+define void @atomic_store_f64_monotonic(ptr %a, double %b) nounwind {
+; RV32I-LABEL: atomic_store_f64_monotonic:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: fsd fa0, 0(sp)
+; RV32I-NEXT: lw a1, 0(sp)
+; RV32I-NEXT: lw a2, 4(sp)
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __atomic_store_8
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_store_f64_monotonic:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: fsd fa0, 0(sp)
+; RV32IA-NEXT: lw a1, 0(sp)
+; RV32IA-NEXT: lw a2, 4(sp)
+; RV32IA-NEXT: li a3, 0
+; RV32IA-NEXT: call __atomic_store_8
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f64_monotonic:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 0
+; RV64I-NEXT: call __atomic_store_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomic_store_f64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: fmv.x.d a1, fa0
+; RV64IA-NEXT: sd a1, 0(a0)
+; RV64IA-NEXT: ret
+ store atomic double %b, ptr %a monotonic, align 8
+ ret void
+}
+
+define void @atomic_store_f64_release(ptr %a, double %b) nounwind {
+; RV32I-LABEL: atomic_store_f64_release:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: fsd fa0, 0(sp)
+; RV32I-NEXT: lw a1, 0(sp)
+; RV32I-NEXT: lw a2, 4(sp)
+; RV32I-NEXT: li a3, 3
+; RV32I-NEXT: call __atomic_store_8
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_store_f64_release:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: fsd fa0, 0(sp)
+; RV32IA-NEXT: lw a1, 0(sp)
+; RV32IA-NEXT: lw a2, 4(sp)
+; RV32IA-NEXT: li a3, 3
+; RV32IA-NEXT: call __atomic_store_8
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f64_release:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 3
+; RV64I-NEXT: call __atomic_store_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_store_f64_release:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: fence rw, w
+; RV64IA-WMO-NEXT: fmv.x.d a1, fa0
+; RV64IA-WMO-NEXT: sd a1, 0(a0)
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_store_f64_release:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: fmv.x.d a1, fa0
+; RV64IA-TSO-NEXT: sd a1, 0(a0)
+; RV64IA-TSO-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_store_f64_release:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, w
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.x.d a1, fa0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: sd a1, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_store_f64_release:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.x.d a1, fa0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: sd a1, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ store atomic double %b, ptr %a release, align 8
+ ret void
+}
+
+define void @atomic_store_f64_seq_cst(ptr %a, double %b) nounwind {
+; RV32I-LABEL: atomic_store_f64_seq_cst:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: fsd fa0, 0(sp)
+; RV32I-NEXT: lw a1, 0(sp)
+; RV32I-NEXT: lw a2, 4(sp)
+; RV32I-NEXT: li a3, 5
+; RV32I-NEXT: call __atomic_store_8
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IA-LABEL: atomic_store_f64_seq_cst:
+; RV32IA: # %bb.0:
+; RV32IA-NEXT: addi sp, sp, -16
+; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IA-NEXT: fsd fa0, 0(sp)
+; RV32IA-NEXT: lw a1, 0(sp)
+; RV32IA-NEXT: lw a2, 4(sp)
+; RV32IA-NEXT: li a3, 5
+; RV32IA-NEXT: call __atomic_store_8
+; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IA-NEXT: addi sp, sp, 16
+; RV32IA-NEXT: ret
+;
+; RV64I-LABEL: atomic_store_f64_seq_cst:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: li a2, 5
+; RV64I-NEXT: call __atomic_store_8
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IA-WMO-LABEL: atomic_store_f64_seq_cst:
+; RV64IA-WMO: # %bb.0:
+; RV64IA-WMO-NEXT: fence rw, w
+; RV64IA-WMO-NEXT: fmv.x.d a1, fa0
+; RV64IA-WMO-NEXT: sd a1, 0(a0)
+; RV64IA-WMO-NEXT: ret
+;
+; RV64IA-TSO-LABEL: atomic_store_f64_seq_cst:
+; RV64IA-TSO: # %bb.0:
+; RV64IA-TSO-NEXT: fmv.x.d a1, fa0
+; RV64IA-TSO-NEXT: sd a1, 0(a0)
+; RV64IA-TSO-NEXT: fence rw, rw
+; RV64IA-TSO-NEXT: ret
+;
+; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_store_f64_seq_cst:
+; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, w
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fmv.x.d a1, fa0
+; RV64IA-WMO-TRAILING-FENCE-NEXT: sd a1, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_store_f64_seq_cst:
+; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fmv.x.d a1, fa0
+; RV64IA-TSO-TRAILING-FENCE-NEXT: sd a1, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
+; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+ store atomic double %b, ptr %a seq_cst, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
index 1d5d918..5d3fed4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
@@ -23,6 +23,15 @@
; RUN: llc -mtriple=riscv64 -global-isel -mattr=+a,+ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO-TRAILING-FENCE %s
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+a,+experimental-zalasr -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZALASR,RV32IA-ZALASR-WMO %s
+; RUN: llc -mtriple=riscv32 -global-isel -mattr=+a,+experimental-zalasr,+ztso -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZALASR,RV32IA-ZALASR-TSO %s
+
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+a,+experimental-zalasr -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZALASR,RV64IA-ZALASR-WMO %s
+; RUN: llc -mtriple=riscv64 -global-isel -mattr=+a,+experimental-zalasr,+ztso -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZALASR,RV64IA-ZALASR-TSO %s
define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32I-LABEL: atomic_load_i8_unordered:
@@ -156,6 +165,26 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-WMO-LABEL: atomic_load_i8_acquire:
+; RV32IA-ZALASR-WMO: # %bb.0:
+; RV32IA-ZALASR-WMO-NEXT: lb.aq a0, (a0)
+; RV32IA-ZALASR-WMO-NEXT: ret
+;
+; RV32IA-ZALASR-TSO-LABEL: atomic_load_i8_acquire:
+; RV32IA-ZALASR-TSO: # %bb.0:
+; RV32IA-ZALASR-TSO-NEXT: lbu a0, 0(a0)
+; RV32IA-ZALASR-TSO-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_load_i8_acquire:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: lb.aq a0, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_load_i8_acquire:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: lbu a0, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
%1 = load atomic i8, ptr %a acquire, align 1
ret i8 %1
}
@@ -232,6 +261,16 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-LABEL: atomic_load_i8_seq_cst:
+; RV32IA-ZALASR: # %bb.0:
+; RV32IA-ZALASR-NEXT: lb.aq a0, (a0)
+; RV32IA-ZALASR-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_load_i8_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: lb.aq a0, (a0)
+; RV64IA-ZALASR-NEXT: ret
%1 = load atomic i8, ptr %a seq_cst, align 1
ret i8 %1
}
@@ -368,6 +407,26 @@ define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: lh a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-WMO-LABEL: atomic_load_i16_acquire:
+; RV32IA-ZALASR-WMO: # %bb.0:
+; RV32IA-ZALASR-WMO-NEXT: lh.aq a0, (a0)
+; RV32IA-ZALASR-WMO-NEXT: ret
+;
+; RV32IA-ZALASR-TSO-LABEL: atomic_load_i16_acquire:
+; RV32IA-ZALASR-TSO: # %bb.0:
+; RV32IA-ZALASR-TSO-NEXT: lh a0, 0(a0)
+; RV32IA-ZALASR-TSO-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_load_i16_acquire:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: lh.aq a0, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_load_i16_acquire:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: lh a0, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
%1 = load atomic i16, ptr %a acquire, align 2
ret i16 %1
}
@@ -444,6 +503,16 @@ define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: lh a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-LABEL: atomic_load_i16_seq_cst:
+; RV32IA-ZALASR: # %bb.0:
+; RV32IA-ZALASR-NEXT: lh.aq a0, (a0)
+; RV32IA-ZALASR-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_load_i16_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: lh.aq a0, (a0)
+; RV64IA-ZALASR-NEXT: ret
%1 = load atomic i16, ptr %a seq_cst, align 2
ret i16 %1
}
@@ -580,6 +649,26 @@ define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-WMO-LABEL: atomic_load_i32_acquire:
+; RV32IA-ZALASR-WMO: # %bb.0:
+; RV32IA-ZALASR-WMO-NEXT: lw.aq a0, (a0)
+; RV32IA-ZALASR-WMO-NEXT: ret
+;
+; RV32IA-ZALASR-TSO-LABEL: atomic_load_i32_acquire:
+; RV32IA-ZALASR-TSO: # %bb.0:
+; RV32IA-ZALASR-TSO-NEXT: lw a0, 0(a0)
+; RV32IA-ZALASR-TSO-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_load_i32_acquire:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: lw.aq a0, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_load_i32_acquire:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: lw a0, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
%1 = load atomic i32, ptr %a acquire, align 4
ret i32 %1
}
@@ -656,6 +745,16 @@ define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-LABEL: atomic_load_i32_seq_cst:
+; RV32IA-ZALASR: # %bb.0:
+; RV32IA-ZALASR-NEXT: lw.aq a0, (a0)
+; RV32IA-ZALASR-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_load_i32_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: lw.aq a0, (a0)
+; RV64IA-ZALASR-NEXT: ret
%1 = load atomic i32, ptr %a seq_cst, align 4
ret i32 %1
}
@@ -790,6 +889,16 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: ld a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_load_i64_acquire:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: ld.aq a0, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_load_i64_acquire:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: ld a0, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
%1 = load atomic i64, ptr %a acquire, align 8
ret i64 %1
}
@@ -850,6 +959,11 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: ld a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_load_i64_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: ld.aq a0, (a0)
+; RV64IA-ZALASR-NEXT: ret
%1 = load atomic i64, ptr %a seq_cst, align 8
ret i64 %1
}
@@ -986,6 +1100,26 @@ define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: sb a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-WMO-LABEL: atomic_store_i8_release:
+; RV32IA-ZALASR-WMO: # %bb.0:
+; RV32IA-ZALASR-WMO-NEXT: sb.rl a1, (a0)
+; RV32IA-ZALASR-WMO-NEXT: ret
+;
+; RV32IA-ZALASR-TSO-LABEL: atomic_store_i8_release:
+; RV32IA-ZALASR-TSO: # %bb.0:
+; RV32IA-ZALASR-TSO-NEXT: sb a1, 0(a0)
+; RV32IA-ZALASR-TSO-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_store_i8_release:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: sb.rl a1, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_store_i8_release:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: sb a1, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
store atomic i8 %b, ptr %a release, align 1
ret void
}
@@ -1060,6 +1194,16 @@ define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: sb a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-LABEL: atomic_store_i8_seq_cst:
+; RV32IA-ZALASR: # %bb.0:
+; RV32IA-ZALASR-NEXT: sb.rl a1, (a0)
+; RV32IA-ZALASR-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_store_i8_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: sb.rl a1, (a0)
+; RV64IA-ZALASR-NEXT: ret
store atomic i8 %b, ptr %a seq_cst, align 1
ret void
}
@@ -1196,6 +1340,26 @@ define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: sh a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-WMO-LABEL: atomic_store_i16_release:
+; RV32IA-ZALASR-WMO: # %bb.0:
+; RV32IA-ZALASR-WMO-NEXT: sh.rl a1, (a0)
+; RV32IA-ZALASR-WMO-NEXT: ret
+;
+; RV32IA-ZALASR-TSO-LABEL: atomic_store_i16_release:
+; RV32IA-ZALASR-TSO: # %bb.0:
+; RV32IA-ZALASR-TSO-NEXT: sh a1, 0(a0)
+; RV32IA-ZALASR-TSO-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_store_i16_release:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: sh.rl a1, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_store_i16_release:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: sh a1, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
store atomic i16 %b, ptr %a release, align 2
ret void
}
@@ -1270,6 +1434,16 @@ define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: sh a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-LABEL: atomic_store_i16_seq_cst:
+; RV32IA-ZALASR: # %bb.0:
+; RV32IA-ZALASR-NEXT: sh.rl a1, (a0)
+; RV32IA-ZALASR-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_store_i16_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: sh.rl a1, (a0)
+; RV64IA-ZALASR-NEXT: ret
store atomic i16 %b, ptr %a seq_cst, align 2
ret void
}
@@ -1406,6 +1580,26 @@ define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-WMO-LABEL: atomic_store_i32_release:
+; RV32IA-ZALASR-WMO: # %bb.0:
+; RV32IA-ZALASR-WMO-NEXT: sw.rl a1, (a0)
+; RV32IA-ZALASR-WMO-NEXT: ret
+;
+; RV32IA-ZALASR-TSO-LABEL: atomic_store_i32_release:
+; RV32IA-ZALASR-TSO: # %bb.0:
+; RV32IA-ZALASR-TSO-NEXT: sw a1, 0(a0)
+; RV32IA-ZALASR-TSO-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_store_i32_release:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: sw.rl a1, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_store_i32_release:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: sw a1, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
store atomic i32 %b, ptr %a release, align 4
ret void
}
@@ -1480,6 +1674,16 @@ define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV32IA-ZALASR-LABEL: atomic_store_i32_seq_cst:
+; RV32IA-ZALASR: # %bb.0:
+; RV32IA-ZALASR-NEXT: sw.rl a1, (a0)
+; RV32IA-ZALASR-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_store_i32_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: sw.rl a1, (a0)
+; RV64IA-ZALASR-NEXT: ret
store atomic i32 %b, ptr %a seq_cst, align 4
ret void
}
@@ -1614,6 +1818,16 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: sd a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-ZALASR-WMO-LABEL: atomic_store_i64_release:
+; RV64IA-ZALASR-WMO: # %bb.0:
+; RV64IA-ZALASR-WMO-NEXT: sd.rl a1, (a0)
+; RV64IA-ZALASR-WMO-NEXT: ret
+;
+; RV64IA-ZALASR-TSO-LABEL: atomic_store_i64_release:
+; RV64IA-ZALASR-TSO: # %bb.0:
+; RV64IA-ZALASR-TSO-NEXT: sd a1, 0(a0)
+; RV64IA-ZALASR-TSO-NEXT: ret
store atomic i64 %b, ptr %a release, align 8
ret void
}
@@ -1673,6 +1887,11 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64IA-TSO-TRAILING-FENCE-NEXT: sd a1, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
+;
+; RV64IA-ZALASR-LABEL: atomic_store_i64_seq_cst:
+; RV64IA-ZALASR: # %bb.0:
+; RV64IA-ZALASR-NEXT: sd.rl a1, (a0)
+; RV64IA-ZALASR-NEXT: ret
store atomic i64 %b, ptr %a seq_cst, align 8
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv32.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv32.ll
new file mode 100644
index 0000000..85a5d9a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv32.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel -global-isel-abort=2 \
+; RUN: -pass-remarks-missed='gisel*' -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \
+; RUN: %s -o %t.out 2> %t.err
+; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out
+; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err
+
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call
+; FALLBACK-WITH-REPORT-OUT-LABEL: test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t
+define target("riscv.vector.tuple", <vscale x 1 x i8>, 2) @test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %vl) {
+entry:
+ %0 = tail call target("riscv.vector.tuple", <vscale x 1 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) poison, ptr %base, i32 %vl, i32 3)
+ ret target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %0
+}
+
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower arguments
+; FALLBACK-WITH-REPORT-OUT-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t
+define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %vl) {
+entry:
+ tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %vl, i32 3)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv64.ll
new file mode 100644
index 0000000..b5405d3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/fallback-rv64.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -global-isel -global-isel-abort=2 \
+; RUN: -pass-remarks-missed='gisel*' -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \
+; RUN: %s -o %t.out 2> %t.err
+; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out
+; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err
+
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call
+; FALLBACK-WITH-REPORT-OUT-LABEL: test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t
+define target("riscv.vector.tuple", <vscale x 1 x i8>, 2) @test_vlseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl) {
+entry:
+ %0 = tail call target("riscv.vector.tuple", <vscale x 1 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) poison, ptr %base, i64 %vl, i64 3)
+ ret target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %0
+}
+
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower arguments
+; FALLBACK-WITH-REPORT-OUT-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t
+define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl) {
+entry:
+ tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll
index e761d3a..33b89a4 100644
--- a/llvm/test/CodeGen/RISCV/pr69586.ll
+++ b/llvm/test/CodeGen/RISCV/pr69586.ll
@@ -39,119 +39,118 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: slli a2, a2, 1
; NOREMAT-NEXT: sub sp, sp, a2
; NOREMAT-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xf0, 0x05, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 752 + 2 * vlenb
-; NOREMAT-NEXT: mv a7, a0
-; NOREMAT-NEXT: li a0, 32
-; NOREMAT-NEXT: addi a5, a7, 512
-; NOREMAT-NEXT: addi a4, a7, 1024
-; NOREMAT-NEXT: addi a6, a7, 1536
-; NOREMAT-NEXT: li t4, 1
-; NOREMAT-NEXT: li a2, 5
+; NOREMAT-NEXT: li a7, 32
+; NOREMAT-NEXT: addi s10, a0, 512
+; NOREMAT-NEXT: addi a4, a0, 1024
+; NOREMAT-NEXT: addi a6, a0, 1536
+; NOREMAT-NEXT: li t0, 1
+; NOREMAT-NEXT: li a3, 5
; NOREMAT-NEXT: li t1, 3
-; NOREMAT-NEXT: li t0, 7
-; NOREMAT-NEXT: lui t5, 1
+; NOREMAT-NEXT: li a2, 7
+; NOREMAT-NEXT: lui t2, 1
; NOREMAT-NEXT: li s4, 9
; NOREMAT-NEXT: li s6, 11
; NOREMAT-NEXT: li s9, 13
; NOREMAT-NEXT: li ra, 15
-; NOREMAT-NEXT: lui t2, 2
+; NOREMAT-NEXT: lui a5, 2
; NOREMAT-NEXT: lui s1, 3
; NOREMAT-NEXT: lui t3, 4
; NOREMAT-NEXT: lui s0, 5
; NOREMAT-NEXT: lui s3, 6
; NOREMAT-NEXT: lui s7, 7
-; NOREMAT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOREMAT-NEXT: slli t4, t4, 11
-; NOREMAT-NEXT: sd t4, 512(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: slli a3, a2, 9
-; NOREMAT-NEXT: sd a3, 504(sp) # 8-byte Folded Spill
+; NOREMAT-NEXT: vsetvli zero, a7, e32, m2, ta, ma
+; NOREMAT-NEXT: slli t0, t0, 11
+; NOREMAT-NEXT: sd t0, 512(sp) # 8-byte Folded Spill
+; NOREMAT-NEXT: slli t4, a3, 9
+; NOREMAT-NEXT: sd t4, 504(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: slli t6, t1, 10
-; NOREMAT-NEXT: slli s2, t0, 9
-; NOREMAT-NEXT: add a0, a7, t5
+; NOREMAT-NEXT: slli s2, a2, 9
+; NOREMAT-NEXT: add a7, a0, t2
; NOREMAT-NEXT: lui s11, 1
; NOREMAT-NEXT: slli s4, s4, 9
-; NOREMAT-NEXT: slli s5, a2, 10
+; NOREMAT-NEXT: slli s5, a3, 10
; NOREMAT-NEXT: slli s6, s6, 9
; NOREMAT-NEXT: slli s8, t1, 11
-; NOREMAT-NEXT: vle32.v v8, (a5)
+; NOREMAT-NEXT: vle32.v v8, (s10)
; NOREMAT-NEXT: slli s9, s9, 9
; NOREMAT-NEXT: li t5, 13
; NOREMAT-NEXT: vle32.v v10, (a4)
; NOREMAT-NEXT: vle32.v v2, (a4)
-; NOREMAT-NEXT: slli s10, t0, 10
+; NOREMAT-NEXT: slli s10, a2, 10
; NOREMAT-NEXT: vle32.v v0, (a6)
; NOREMAT-NEXT: vle32.v v12, (a6)
; NOREMAT-NEXT: slli ra, ra, 9
-; NOREMAT-NEXT: vle32.v v4, (a0)
-; NOREMAT-NEXT: vle32.v v20, (a0)
-; NOREMAT-NEXT: add a4, a7, t2
+; NOREMAT-NEXT: vle32.v v4, (a7)
+; NOREMAT-NEXT: vle32.v v20, (a7)
+; NOREMAT-NEXT: add a4, a0, a5
; NOREMAT-NEXT: vle32.v v6, (a4)
; NOREMAT-NEXT: vle32.v v30, (a4)
-; NOREMAT-NEXT: add a4, a7, s1
+; NOREMAT-NEXT: add a4, a0, s1
; NOREMAT-NEXT: vle32.v v28, (a4)
; NOREMAT-NEXT: vle32.v v26, (a4)
-; NOREMAT-NEXT: add a4, a7, t3
+; NOREMAT-NEXT: add a4, a0, t3
; NOREMAT-NEXT: vle32.v v24, (a4)
; NOREMAT-NEXT: vle32.v v22, (a4)
-; NOREMAT-NEXT: add a4, a7, s0
-; NOREMAT-NEXT: vle32.v v14, (a7)
+; NOREMAT-NEXT: add a4, a0, s0
+; NOREMAT-NEXT: vle32.v v14, (a0)
; NOREMAT-NEXT: vle32.v v18, (a4)
; NOREMAT-NEXT: vle32.v v16, (a4)
-; NOREMAT-NEXT: add a4, a7, s3
+; NOREMAT-NEXT: add a4, a0, s3
; NOREMAT-NEXT: sf.vc.vv 3, 0, v14, v8
; NOREMAT-NEXT: vle32.v v14, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v8, v10
; NOREMAT-NEXT: vle32.v v8, (a4)
-; NOREMAT-NEXT: addi a0, sp, 640
-; NOREMAT-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; NOREMAT-NEXT: add a4, a7, t4
+; NOREMAT-NEXT: addi a4, sp, 640
+; NOREMAT-NEXT: vs2r.v v8, (a4) # vscale x 16-byte Folded Spill
+; NOREMAT-NEXT: add a4, a0, t0
; NOREMAT-NEXT: vle32.v v10, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v2, v0
; NOREMAT-NEXT: vle32.v v2, (a4)
-; NOREMAT-NEXT: add a4, a7, a3
+; NOREMAT-NEXT: add a4, a0, t4
; NOREMAT-NEXT: vle32.v v0, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v10
; NOREMAT-NEXT: vle32.v v10, (a4)
-; NOREMAT-NEXT: add a4, a7, t6
+; NOREMAT-NEXT: add a4, a0, t6
; NOREMAT-NEXT: vle32.v v12, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v2, v0
; NOREMAT-NEXT: vle32.v v2, (a4)
-; NOREMAT-NEXT: add a4, a7, s2
+; NOREMAT-NEXT: add a4, a0, s2
; NOREMAT-NEXT: vle32.v v8, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v10, v12
; NOREMAT-NEXT: vle32.v v12, (a4)
-; NOREMAT-NEXT: add a4, a7, s7
+; NOREMAT-NEXT: add a4, a0, s7
; NOREMAT-NEXT: vle32.v v0, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v2, v8
; NOREMAT-NEXT: vle32.v v10, (a4)
-; NOREMAT-NEXT: add a4, a7, s4
+; NOREMAT-NEXT: add a4, a0, s4
; NOREMAT-NEXT: vle32.v v8, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v4
; NOREMAT-NEXT: vle32.v v12, (a4)
-; NOREMAT-NEXT: add a4, a7, s5
+; NOREMAT-NEXT: add a4, a0, s5
; NOREMAT-NEXT: vle32.v v4, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v20, v8
; NOREMAT-NEXT: vle32.v v8, (a4)
-; NOREMAT-NEXT: add a4, a7, s6
+; NOREMAT-NEXT: add a4, a0, s6
; NOREMAT-NEXT: vle32.v v20, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v4
; NOREMAT-NEXT: vle32.v v12, (a4)
-; NOREMAT-NEXT: add a4, a7, s8
+; NOREMAT-NEXT: add a4, a0, s8
; NOREMAT-NEXT: vle32.v v4, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v8, v20
; NOREMAT-NEXT: vle32.v v8, (a4)
-; NOREMAT-NEXT: add a4, a7, s9
+; NOREMAT-NEXT: add a4, a0, s9
; NOREMAT-NEXT: vle32.v v20, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v4
; NOREMAT-NEXT: vle32.v v12, (a4)
-; NOREMAT-NEXT: add a4, a7, s10
+; NOREMAT-NEXT: add a4, a0, s10
; NOREMAT-NEXT: vle32.v v4, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v8, v20
; NOREMAT-NEXT: vle32.v v8, (a4)
-; NOREMAT-NEXT: add a4, a7, ra
+; NOREMAT-NEXT: add a4, a0, ra
; NOREMAT-NEXT: vle32.v v2, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v4
; NOREMAT-NEXT: lui t4, 8
-; NOREMAT-NEXT: add a5, a7, t4
+; NOREMAT-NEXT: add a5, a0, t4
; NOREMAT-NEXT: vle32.v v20, (a5)
; NOREMAT-NEXT: vle32.v v12, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v8, v2
@@ -159,14 +158,14 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: slli a4, a4, 9
; NOREMAT-NEXT: li s1, 17
; NOREMAT-NEXT: sd a4, 624(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a4, a7, a4
+; NOREMAT-NEXT: add a4, a0, a4
; NOREMAT-NEXT: vle32.v v8, (a4)
; NOREMAT-NEXT: vle32.v v4, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v6
; NOREMAT-NEXT: li a5, 9
; NOREMAT-NEXT: slli a4, a5, 10
; NOREMAT-NEXT: sd a4, 616(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a4, a7, a4
+; NOREMAT-NEXT: add a4, a0, a4
; NOREMAT-NEXT: vle32.v v12, (a4)
; NOREMAT-NEXT: vle32.v v6, (a4)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v8
@@ -174,256 +173,257 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: slli a4, a4, 9
; NOREMAT-NEXT: li t2, 19
; NOREMAT-NEXT: sd a4, 608(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a4, a7, a4
+; NOREMAT-NEXT: add a4, a0, a4
; NOREMAT-NEXT: vle32.v v8, (a4)
; NOREMAT-NEXT: vle32.v v30, (a4)
-; NOREMAT-NEXT: slli a3, a2, 11
+; NOREMAT-NEXT: slli a3, a3, 11
; NOREMAT-NEXT: sd a3, 600(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v12
-; NOREMAT-NEXT: add a3, a7, a3
+; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v12, (a3)
; NOREMAT-NEXT: vle32.v v4, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v8
; NOREMAT-NEXT: li s7, 21
; NOREMAT-NEXT: slli a3, s7, 9
; NOREMAT-NEXT: sd a3, 592(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a3, a7, a3
+; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v8, (a3)
; NOREMAT-NEXT: vle32.v v6, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v12
; NOREMAT-NEXT: li a6, 11
; NOREMAT-NEXT: slli a3, a6, 10
; NOREMAT-NEXT: sd a3, 584(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a3, a7, a3
+; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v12, (a3)
; NOREMAT-NEXT: vle32.v v30, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v8
; NOREMAT-NEXT: li s3, 23
; NOREMAT-NEXT: slli a3, s3, 9
; NOREMAT-NEXT: sd a3, 576(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a3, a7, a3
+; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v8, (a3)
; NOREMAT-NEXT: vle32.v v4, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v12
; NOREMAT-NEXT: li s0, 25
; NOREMAT-NEXT: slli a3, s0, 9
; NOREMAT-NEXT: sd a3, 568(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a3, a7, a3
+; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v12, (a3)
; NOREMAT-NEXT: vle32.v v6, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v8
; NOREMAT-NEXT: slli a3, t5, 10
; NOREMAT-NEXT: sd a3, 560(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a3, a7, a3
+; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v8, (a3)
; NOREMAT-NEXT: vle32.v v30, (a3)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v28
; NOREMAT-NEXT: li t3, 27
; NOREMAT-NEXT: slli a3, t3, 9
; NOREMAT-NEXT: sd a3, 552(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a3, a7, a3
+; NOREMAT-NEXT: add a3, a0, a3
; NOREMAT-NEXT: vle32.v v28, (a3)
; NOREMAT-NEXT: vle32.v v4, (a3)
-; NOREMAT-NEXT: slli a2, t0, 11
+; NOREMAT-NEXT: slli a2, a2, 11
; NOREMAT-NEXT: sd a2, 544(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v12
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v12, (a2)
; NOREMAT-NEXT: vle32.v v26, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v8
; NOREMAT-NEXT: li t0, 29
; NOREMAT-NEXT: slli a2, t0, 9
; NOREMAT-NEXT: sd a2, 536(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v8, (a2)
; NOREMAT-NEXT: vle32.v v6, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v28
-; NOREMAT-NEXT: li a3, 15
-; NOREMAT-NEXT: slli a2, a3, 10
+; NOREMAT-NEXT: li a7, 15
+; NOREMAT-NEXT: slli a2, a7, 10
; NOREMAT-NEXT: sd a2, 528(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v28, (a2)
; NOREMAT-NEXT: vle32.v v30, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v12
; NOREMAT-NEXT: li t1, 31
; NOREMAT-NEXT: slli a2, t1, 9
; NOREMAT-NEXT: sd a2, 520(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v12, (a2)
; NOREMAT-NEXT: vle32.v v4, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v8
-; NOREMAT-NEXT: lui a4, 4
-; NOREMAT-NEXT: addi a0, a4, 512
-; NOREMAT-NEXT: sd a0, 496(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a0, a7, a0
-; NOREMAT-NEXT: vle32.v v8, (a0)
-; NOREMAT-NEXT: vle32.v v26, (a0)
+; NOREMAT-NEXT: lui a3, 4
+; NOREMAT-NEXT: addi a2, a3, 512
+; NOREMAT-NEXT: sd a2, 496(sp) # 8-byte Folded Spill
+; NOREMAT-NEXT: add a2, a0, a2
+; NOREMAT-NEXT: vle32.v v8, (a2)
+; NOREMAT-NEXT: vle32.v v26, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v28
; NOREMAT-NEXT: slli a2, s1, 10
; NOREMAT-NEXT: sd a2, 488(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v28, (a2)
; NOREMAT-NEXT: vle32.v v6, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v12
-; NOREMAT-NEXT: addi a2, a4, 1536
+; NOREMAT-NEXT: addi a2, a3, 1536
; NOREMAT-NEXT: sd a2, 480(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: lui a4, 4
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v12, (a2)
; NOREMAT-NEXT: vle32.v v30, (a2)
; NOREMAT-NEXT: slli a2, a5, 11
; NOREMAT-NEXT: sd a2, 472(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v24
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v24, (a2)
; NOREMAT-NEXT: vle32.v v4, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v8
; NOREMAT-NEXT: lui a5, 5
; NOREMAT-NEXT: addi a2, a5, -1536
; NOREMAT-NEXT: sd a2, 464(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v8, (a2)
; NOREMAT-NEXT: vle32.v v22, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v28
; NOREMAT-NEXT: slli a2, t2, 10
; NOREMAT-NEXT: sd a2, 456(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: li t2, 19
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: li a3, 19
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v26, (a2)
; NOREMAT-NEXT: vle32.v v28, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v12
; NOREMAT-NEXT: addi a2, a5, -512
; NOREMAT-NEXT: sd a2, 448(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v12, (a2)
; NOREMAT-NEXT: vle32.v v6, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v24
; NOREMAT-NEXT: addi a2, a5, 512
; NOREMAT-NEXT: sd a2, 440(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v24, (a2)
; NOREMAT-NEXT: vle32.v v30, (a2)
; NOREMAT-NEXT: slli a2, s7, 10
; NOREMAT-NEXT: sd a2, 432(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v8
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v8, (a2)
; NOREMAT-NEXT: vle32.v v4, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v22, v26
; NOREMAT-NEXT: addi a2, a5, 1536
; NOREMAT-NEXT: sd a2, 424(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v22, (a2)
; NOREMAT-NEXT: vle32.v v26, (a2)
; NOREMAT-NEXT: slli a2, a6, 11
; NOREMAT-NEXT: sd a2, 416(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v28, v12
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v12, (a2)
; NOREMAT-NEXT: vle32.v v28, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v18
; NOREMAT-NEXT: lui a6, 6
; NOREMAT-NEXT: addi a2, a6, -1536
; NOREMAT-NEXT: sd a2, 408(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v18, (a2)
; NOREMAT-NEXT: vle32.v v6, (a2)
; NOREMAT-NEXT: slli a2, s3, 10
; NOREMAT-NEXT: sd a2, 400(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v16, v24
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v16, (a2)
; NOREMAT-NEXT: vle32.v v24, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v8
; NOREMAT-NEXT: addi a2, a6, -512
; NOREMAT-NEXT: sd a2, 392(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v8, (a2)
; NOREMAT-NEXT: vle32.v v30, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v22
; NOREMAT-NEXT: addi a2, a6, 512
; NOREMAT-NEXT: sd a2, 384(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v22, (a2)
; NOREMAT-NEXT: vle32.v v4, (a2)
; NOREMAT-NEXT: slli a2, s0, 10
; NOREMAT-NEXT: sd a2, 376(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v26, v12
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v26, (a2)
; NOREMAT-NEXT: vle32.v v2, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v28, v18
; NOREMAT-NEXT: addi a2, a6, 1536
; NOREMAT-NEXT: sd a2, 368(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v18, (a2)
; NOREMAT-NEXT: vle32.v v28, (a2)
; NOREMAT-NEXT: slli a2, t5, 11
; NOREMAT-NEXT: sd a2, 360(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v16
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v16, (a2)
; NOREMAT-NEXT: vle32.v v6, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v24, v8
; NOREMAT-NEXT: lui s0, 7
; NOREMAT-NEXT: addi a2, s0, -1536
; NOREMAT-NEXT: sd a2, 352(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v8, (a2)
; NOREMAT-NEXT: vle32.v v24, (a2)
; NOREMAT-NEXT: slli a2, t3, 10
; NOREMAT-NEXT: sd a2, 344(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v14
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v14, (a2)
; NOREMAT-NEXT: vle32.v v30, (a2)
-; NOREMAT-NEXT: addi a0, sp, 640
-; NOREMAT-NEXT: vl2r.v v12, (a0) # vscale x 16-byte Folded Reload
+; NOREMAT-NEXT: addi a2, sp, 640
+; NOREMAT-NEXT: vl2r.v v12, (a2) # vscale x 16-byte Folded Reload
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v22
; NOREMAT-NEXT: addi a2, s0, -512
; NOREMAT-NEXT: sd a2, 336(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v22, (a2)
; NOREMAT-NEXT: vle32.v v12, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v4, v26
; NOREMAT-NEXT: addi a2, s0, 512
; NOREMAT-NEXT: sd a2, 328(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: lui t3, 7
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v26, (a2)
; NOREMAT-NEXT: vle32.v v4, (a2)
; NOREMAT-NEXT: slli a2, t0, 10
; NOREMAT-NEXT: sd a2, 320(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v2, v18
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v18, (a2)
; NOREMAT-NEXT: vle32.v v2, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v28, v16
; NOREMAT-NEXT: addi a2, t3, 1536
; NOREMAT-NEXT: sd a2, 312(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v16, (a2)
; NOREMAT-NEXT: vle32.v v28, (a2)
-; NOREMAT-NEXT: slli a2, a3, 11
+; NOREMAT-NEXT: slli a2, a7, 11
; NOREMAT-NEXT: sd a2, 304(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v6, v8
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v8, (a2)
; NOREMAT-NEXT: vle32.v v6, (a2)
; NOREMAT-NEXT: sf.vc.vv 3, 0, v24, v14
; NOREMAT-NEXT: addi a2, t4, -1536
; NOREMAT-NEXT: sd a2, 296(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v14, (a2)
; NOREMAT-NEXT: vle32.v v24, (a2)
; NOREMAT-NEXT: slli a2, t1, 10
; NOREMAT-NEXT: sd a2, 288(sp) # 8-byte Folded Spill
; NOREMAT-NEXT: sf.vc.vv 3, 0, v30, v22
-; NOREMAT-NEXT: add a2, a7, a2
+; NOREMAT-NEXT: add a2, a0, a2
; NOREMAT-NEXT: vle32.v v22, (a2)
; NOREMAT-NEXT: vle32.v v30, (a2)
-; NOREMAT-NEXT: addi a0, t4, -512
-; NOREMAT-NEXT: sd a0, 280(sp) # 8-byte Folded Spill
-; NOREMAT-NEXT: add a0, a7, a0
+; NOREMAT-NEXT: addi a2, t4, -512
+; NOREMAT-NEXT: sd a2, 280(sp) # 8-byte Folded Spill
+; NOREMAT-NEXT: add a0, a0, a2
; NOREMAT-NEXT: sf.vc.vv 3, 0, v12, v0
; NOREMAT-NEXT: vle32.v v12, (a0)
; NOREMAT-NEXT: vle32.v v0, (a0)
@@ -476,7 +476,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: addi s11, a0, 512
; NOREMAT-NEXT: addi s7, a0, 1024
; NOREMAT-NEXT: addi s3, a0, 1536
-; NOREMAT-NEXT: slli s1, t2, 11
+; NOREMAT-NEXT: slli s1, a3, 11
; NOREMAT-NEXT: lui a0, 10
; NOREMAT-NEXT: addi t2, a0, -1536
; NOREMAT-NEXT: addi a7, a0, -1024
diff --git a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
index 34d4657..c68fa59 100644
--- a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
@@ -78,3 +78,10 @@ define i64 @test0(i64 %n, ptr %p) nounwind {
ret i64 %ret
}
+
+; Check for the explicitly emitted .note.GNU-stack section (ELF only) in the
+; presence of trampolines.
+; UTC_ARGS: --disable
+; RV64-LINUX: .section ".note.GNU-stack","x",@progbits
+; RV64: .section ".note.GNU-stack","x",@progbits
+; UTC_ARGS: --enable
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll
new file mode 100644
index 0000000..b178a56
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll
@@ -0,0 +1,65 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-vulkan-library %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %}
+
+; ModuleID = 'test_counters.hlsl'
+source_filename = "test_counters.hlsl"
+
+; CHECK: OpCapability Int8
+; CHECK-DAG: OpName [[OutputBuffer:%[0-9]+]] "OutputBuffer"
+; CHECK-DAG: OpName [[InputBuffer:%[0-9]+]] "InputBuffer"
+; CHECK-DAG: OpName [[OutputBufferCounter:%[0-9]+]] "OutputBuffer.counter"
+; CHECK-DAG: OpName [[InputBufferCounter:%[0-9]+]] "InputBuffer.counter"
+; CHECK-DAG: OpDecorate [[OutputBuffer]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[OutputBuffer]] Binding 10
+; CHECK-DAG: OpDecorate [[OutputBufferCounter]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[OutputBufferCounter]] Binding 0
+; CHECK-DAG: OpDecorate [[InputBuffer]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[InputBuffer]] Binding 1
+; CHECK-DAG: OpDecorate [[InputBufferCounter]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[InputBufferCounter]] Binding 2
+; CHECK-DAG: [[int:%[0-9]+]] = OpTypeInt 32 0
+; CHECK-DAG: [[zero:%[0-9]+]] = OpConstant [[int]] 0{{$}}
+; CHECK-DAG: [[one:%[0-9]+]] = OpConstant [[int]] 1{{$}}
+; CHECK-DAG: [[minus_one:%[0-9]+]] = OpConstant [[int]] 4294967295
+; CHECK: [[OutputBufferHandle:%[0-9]+]] = OpCopyObject {{%[0-9]+}} [[OutputBuffer]]
+; CHECK: [[InputBufferHandle:%[0-9]+]] = OpCopyObject {{%[0-9]+}} [[InputBuffer]]
+; CHECK: [[InputCounterAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[InputBufferCounter]] [[zero]]
+; CHECK: [[dec:%[0-9]+]] = OpAtomicIAdd [[int]] [[InputCounterAC]] [[one]] [[zero]] [[minus_one]]
+; CHECK: [[iadd:%[0-9]+]] = OpIAdd [[int]] [[dec]] [[minus_one]]
+; CHECK: [[OutputCounterAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[OutputBufferCounter]] [[zero]]
+; CHECK: [[inc:%[0-9]+]] = OpAtomicIAdd [[int]] [[OutputCounterAC]] [[one]] [[zero]] [[one]]
+; CHECK: [[InputAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[InputBufferHandle]] [[zero]] [[iadd]]
+; CHECK: [[load:%[0-9]+]] = OpLoad {{%[0-9]+}} [[InputAC]]
+; CHECK: [[OutputAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[OutputBufferHandle]] [[zero]] [[inc]]
+; CHECK: OpStore [[OutputAC]] [[load]]
+
+
+target triple = "spirv1.6-unknown-vulkan1.3-compute"
+
+@.str = private unnamed_addr constant [13 x i8] c"OutputBuffer\00"
+@.str.2 = private unnamed_addr constant [12 x i8] c"InputBuffer\00"
+
+define void @main() #0 {
+entry:
+ %0 = call target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0f32_12_1t(i32 0, i32 10, i32 1, i32 0, ptr @.str)
+ %1 = call target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %0, i32 0, i32 0)
+ %2 = call target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0f32_12_1t(i32 1, i32 0, i32 1, i32 0, ptr @.str.2)
+ %3 = call target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %2, i32 2, i32 0)
+ %4 = call i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1) %3, i8 -1)
+ %5 = call i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1) %1, i8 1)
+ %6 = call ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %2, i32 %4)
+ %7 = load float, ptr addrspace(11) %6
+ %8 = call ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %0, i32 %5)
+ store float %7, ptr addrspace(11) %8
+ ret void
+}
+
+declare target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0f32_12_1t(i32, i32, i32, i32, ptr) #1
+declare target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1), i32, i32) #1
+declare target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0f32_12_1t(i32, i32, i32, i32, ptr) #1
+declare i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1), i8) #2
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1), i32) #1
+
+attributes #0 = { "hlsl.shader"="compute" "hlsl.numthreads"="1,1,1" }
+attributes #1 = { memory(none) }
+attributes #2 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
diff --git a/llvm/test/CodeGen/SystemZ/llvm.sincos.ll b/llvm/test/CodeGen/SystemZ/llvm.sincos.ll
index 9798077..e3ed31f 100644
--- a/llvm/test/CodeGen/SystemZ/llvm.sincos.ll
+++ b/llvm/test/CodeGen/SystemZ/llvm.sincos.ll
@@ -163,9 +163,9 @@ define { <2 x fp128>, <2 x fp128> } @test_sincos_v2f128(<2 x fp128> %a) #0 {
; LINUX-NEXT: ld %f10, 8(%r3)
; LINUX-NEXT: ld %f0, 16(%r3)
; LINUX-NEXT: ld %f2, 24(%r3)
-; LINUX-NEXT: la %r3, 16(%r2)
-; LINUX-NEXT: la %r4, 48(%r2)
; LINUX-NEXT: la %r2, 176(%r15)
+; LINUX-NEXT: la %r3, 16(%r13)
+; LINUX-NEXT: la %r4, 48(%r13)
; LINUX-NEXT: std %f0, 176(%r15)
; LINUX-NEXT: std %f2, 184(%r15)
; LINUX-NEXT: brasl %r14, sincosl@PLT
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll
index 6f986ce..c418038 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll
@@ -541,11 +541,11 @@ define dso_local arm_aapcs_vfpcc void @two_reductions_mul_add_v8i16(ptr nocaptur
; CHECK-NEXT: cbz r2, .LBB7_4
; CHECK-NEXT: @ %bb.1: @ %vector.ph
; CHECK-NEXT: adds r3, r2, #7
-; CHECK-NEXT: vmov.i32 q1, #0x0
-; CHECK-NEXT: bic r3, r3, #7
; CHECK-NEXT: movs r4, #1
+; CHECK-NEXT: bic r3, r3, #7
+; CHECK-NEXT: vmov.i32 q1, #0x0
; CHECK-NEXT: subs r3, #8
-; CHECK-NEXT: vmov q3, q1
+; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: add.w r12, r4, r3, lsr #3
; CHECK-NEXT: mov r3, r0
; CHECK-NEXT: mov r4, r1
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.ll
index 4020709..fe06601 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.ll
@@ -16,39 +16,40 @@ define void @__arm_2d_impl_rgb16_colour_filling_with_alpha(ptr noalias nocapture
; CHECK-NEXT: sub sp, #64
; CHECK-NEXT: ldrsh.w r7, [r2]
; CHECK-NEXT: cmp r7, #1
-; CHECK-NEXT: blt.w .LBB0_6
+; CHECK-NEXT: blt .LBB0_6
; CHECK-NEXT: @ %bb.2: @ %for.cond3.preheader.us.preheader
-; CHECK-NEXT: movs r2, #252
; CHECK-NEXT: ldr r4, [sp, #152]
+; CHECK-NEXT: movs r2, #252
; CHECK-NEXT: and.w r6, r2, r3, lsr #3
; CHECK-NEXT: movs r2, #120
; CHECK-NEXT: and.w r5, r2, r3, lsr #9
; CHECK-NEXT: lsls r3, r3, #3
-; CHECK-NEXT: uxtb r3, r3
; CHECK-NEXT: muls r6, r4, r6
+; CHECK-NEXT: uxtb r3, r3
; CHECK-NEXT: rsb.w r2, r4, #256
-; CHECK-NEXT: vmov.i16 q2, #0xfc
+; CHECK-NEXT: vmov.i16 q1, #0xfc
+; CHECK-NEXT: vdup.16 q0, r6
; CHECK-NEXT: mul lr, r5, r4
-; CHECK-NEXT: vdup.16 q4, r6
; CHECK-NEXT: mov.w r6, #2016
-; CHECK-NEXT: vmov.i16 q6, #0xf8
+; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill
; CHECK-NEXT: mul r5, r3, r4
; CHECK-NEXT: adds r3, r7, #7
+; CHECK-NEXT: vdup.16 q0, r6
; CHECK-NEXT: bic r3, r3, #7
-; CHECK-NEXT: vdup.16 q3, lr
+; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT: vdup.16 q0, r5
+; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vdup.16 q0, lr
; CHECK-NEXT: subs r3, #8
+; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
; CHECK-NEXT: movs r4, #1
-; CHECK-NEXT: vdup.16 q0, r5
-; CHECK-NEXT: lsls r1, r1, #1
+; CHECK-NEXT: vldrw.u32 q7, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: add.w r3, r4, r3, lsr #3
-; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT: vmov.i16 q0, #0xf800
+; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q2, [sp] @ 16-byte Reload
+; CHECK-NEXT: lsls r1, r1, #1
; CHECK-NEXT: movs r4, #0
-; CHECK-NEXT: vdup.16 q5, r6
-; CHECK-NEXT: vmov.i16 q7, #0x78
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
-; CHECK-NEXT: vstrw.32 q2, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT: vstrw.32 q3, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vmov.i16 q4, #0xf8
; CHECK-NEXT: .LBB0_3: @ %vector.ph
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB0_4 Depth 2
@@ -59,37 +60,31 @@ define void @__arm_2d_impl_rgb16_colour_filling_with_alpha(ptr noalias nocapture
; CHECK-NEXT: @ Parent Loop BB0_3 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
; CHECK-NEXT: vctp.16 r6
-; CHECK-NEXT: subs r6, #8
+; CHECK-NEXT: vmov.i16 q5, #0xf800
; CHECK-NEXT: vpst
; CHECK-NEXT: vldrht.u16 q0, [r5]
-; CHECK-NEXT: vshr.u16 q1, q0, #3
-; CHECK-NEXT: vand q1, q1, q2
-; CHECK-NEXT: vmov q2, q4
-; CHECK-NEXT: vmla.i16 q2, q1, r2
-; CHECK-NEXT: vshr.u16 q1, q2, #5
-; CHECK-NEXT: vshl.i16 q2, q0, #3
-; CHECK-NEXT: vand q3, q1, q5
-; CHECK-NEXT: vmov q1, q7
-; CHECK-NEXT: vand q2, q2, q6
-; CHECK-NEXT: vmov q7, q6
-; CHECK-NEXT: vmov q6, q5
-; CHECK-NEXT: vmov q5, q4
-; CHECK-NEXT: vldrw.u32 q4, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT: subs r6, #8
+; CHECK-NEXT: vshr.u16 q3, q0, #3
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vldrw.u32 q1, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT: vmla.i16 q1, q3, r2
+; CHECK-NEXT: vshl.i16 q3, q0, #3
+; CHECK-NEXT: vand q3, q3, q4
+; CHECK-NEXT: vmov q4, q6
+; CHECK-NEXT: vshr.u16 q1, q1, #5
+; CHECK-NEXT: vmla.i16 q4, q3, r2
+; CHECK-NEXT: vshr.u16 q3, q4, #11
+; CHECK-NEXT: vand q1, q1, q7
+; CHECK-NEXT: vorr q1, q1, q3
; CHECK-NEXT: vshr.u16 q0, q0, #9
-; CHECK-NEXT: vmla.i16 q4, q2, r2
-; CHECK-NEXT: vshr.u16 q2, q4, #11
-; CHECK-NEXT: vmov q4, q5
-; CHECK-NEXT: vmov q5, q6
-; CHECK-NEXT: vmov q6, q7
-; CHECK-NEXT: vmov q7, q1
-; CHECK-NEXT: vorr q1, q3, q2
-; CHECK-NEXT: vldrw.u32 q2, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT: vand q0, q0, q7
-; CHECK-NEXT: vmla.i16 q2, q0, r2
-; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
-; CHECK-NEXT: vand q0, q2, q0
-; CHECK-NEXT: vldrw.u32 q2, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT: vmov.i16 q3, #0x78
+; CHECK-NEXT: vmov.i16 q4, #0xf8
+; CHECK-NEXT: vand q0, q0, q3
+; CHECK-NEXT: vmov q3, q2
+; CHECK-NEXT: vmla.i16 q3, q0, r2
+; CHECK-NEXT: vand q0, q3, q5
; CHECK-NEXT: vorr q0, q1, q0
+; CHECK-NEXT: vmov.i16 q1, #0xfc
; CHECK-NEXT: vpst
; CHECK-NEXT: vstrht.16 q0, [r5], #16
; CHECK-NEXT: le lr, .LBB0_4
@@ -190,7 +185,7 @@ define void @__arm_2d_impl_rgb16_colour_filling_with_alpha_sched(ptr noalias noc
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: ldrsh.w r12, [r2, #2]
; CHECK-NEXT: cmp.w r12, #1
-; CHECK-NEXT: blt.w .LBB1_7
+; CHECK-NEXT: blt .LBB1_7
; CHECK-NEXT: @ %bb.1: @ %for.cond3.preheader.lr.ph
; CHECK-NEXT: ldrsh.w r2, [r2]
; CHECK-NEXT: cmp r2, #1
@@ -200,71 +195,70 @@ define void @__arm_2d_impl_rgb16_colour_filling_with_alpha_sched(ptr noalias noc
; CHECK-NEXT: push {r4, r5, r6, r7, lr}
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: sub sp, #80
-; CHECK-NEXT: ldr r7, [sp, #168]
+; CHECK-NEXT: ldr r7, [sp, #88]
; CHECK-NEXT: movs r5, #120
; CHECK-NEXT: lsls r6, r3, #3
; CHECK-NEXT: movs r4, #252
; CHECK-NEXT: and.w r5, r5, r3, lsr #9
; CHECK-NEXT: uxtb r6, r6
; CHECK-NEXT: and.w r3, r4, r3, lsr #3
+; CHECK-NEXT: adds r4, r2, #7
; CHECK-NEXT: muls r6, r7, r6
+; CHECK-NEXT: bic r4, r4, #7
; CHECK-NEXT: mul lr, r3, r7
-; CHECK-NEXT: vdup.16 q0, r6
-; CHECK-NEXT: vstrw.32 q0, [sp, #64] @ 16-byte Spill
-; CHECK-NEXT: vdup.16 q0, lr
; CHECK-NEXT: muls r5, r7, r5
-; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT: vmov.i16 q0, #0xfc
-; CHECK-NEXT: mov.w r6, #2016
-; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT: vdup.16 q0, r5
; CHECK-NEXT: rsb.w r3, r7, #256
; CHECK-NEXT: lsls r7, r1, #1
-; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT: vdup.16 q0, r6
+; CHECK-NEXT: sub.w r1, r4, #8
+; CHECK-NEXT: movs r4, #1
; CHECK-NEXT: vmov.i16 q2, #0xf8
-; CHECK-NEXT: vmov.i16 q5, #0x78
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
-; CHECK-NEXT: vmov.i16 q6, #0xf800
+; CHECK-NEXT: add.w r1, r4, r1, lsr #3
+; CHECK-NEXT: vdup.16 q6, r6
+; CHECK-NEXT: mov.w r6, #2016
; CHECK-NEXT: movs r4, #0
-; CHECK-NEXT: vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT: vdup.16 q3, lr
+; CHECK-NEXT: vdup.16 q5, r5
+; CHECK-NEXT: vdup.16 q7, r6
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: .LBB1_3: @ %vector.ph
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB1_4 Depth 2
; CHECK-NEXT: mov r5, r0
-; CHECK-NEXT: dlstp.16 lr, r2
+; CHECK-NEXT: mov r6, r2
+; CHECK-NEXT: dls lr, r1
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: .LBB1_4: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB1_3 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: vldrh.u16 q0, [r5]
+; CHECK-NEXT: vctp.16 r6
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vldrht.u16 q0, [r5]
; CHECK-NEXT: vshl.i16 q1, q0, #3
-; CHECK-NEXT: vldrw.u32 q3, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT: subs r6, #8
; CHECK-NEXT: vand q1, q1, q2
-; CHECK-NEXT: vmla.i16 q3, q1, r3
-; CHECK-NEXT: vmov.f64 d8, d4
-; CHECK-NEXT: vmov.f64 d9, d5
-; CHECK-NEXT: vldrw.u32 q1, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT: vshr.u16 q2, q0, #9
+; CHECK-NEXT: vmov.i16 q2, #0x78
+; CHECK-NEXT: vshr.u16 q4, q0, #9
+; CHECK-NEXT: vand q4, q4, q2
+; CHECK-NEXT: vmov q2, q6
+; CHECK-NEXT: vmla.i16 q2, q1, r3
; CHECK-NEXT: vshr.u16 q0, q0, #3
+; CHECK-NEXT: vmov.i16 q1, #0xfc
; CHECK-NEXT: vand q0, q0, q1
-; CHECK-NEXT: vldrw.u32 q1, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT: vmov q1, q3
; CHECK-NEXT: vmla.i16 q1, q0, r3
-; CHECK-NEXT: vand q2, q2, q5
-; CHECK-NEXT: vshr.u16 q0, q3, #11
-; CHECK-NEXT: vldrw.u32 q3, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vshr.u16 q0, q2, #11
+; CHECK-NEXT: vmov q2, q5
+; CHECK-NEXT: vmla.i16 q2, q4, r3
; CHECK-NEXT: vshr.u16 q1, q1, #5
-; CHECK-NEXT: vmla.i16 q3, q2, r3
+; CHECK-NEXT: vmov.i16 q4, #0xf800
; CHECK-NEXT: vand q1, q1, q7
; CHECK-NEXT: vorr q0, q1, q0
-; CHECK-NEXT: vand q1, q3, q6
+; CHECK-NEXT: vand q1, q2, q4
+; CHECK-NEXT: vmov.i16 q2, #0xf8
; CHECK-NEXT: vorr q0, q0, q1
-; CHECK-NEXT: vstrh.16 q0, [r5], #16
-; CHECK-NEXT: vmov.f64 d4, d8
-; CHECK-NEXT: vmov.f64 d5, d9
-; CHECK-NEXT: letp lr, .LBB1_4
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vstrht.16 q0, [r5], #16
+; CHECK-NEXT: le lr, .LBB1_4
; CHECK-NEXT: @ %bb.5: @ %for.cond3.for.cond.cleanup7_crit_edge.us
; CHECK-NEXT: @ in Loop: Header=BB1_3 Depth=1
; CHECK-NEXT: adds r4, #1
@@ -272,7 +266,6 @@ define void @__arm_2d_impl_rgb16_colour_filling_with_alpha_sched(ptr noalias noc
; CHECK-NEXT: cmp r4, r12
; CHECK-NEXT: bne .LBB1_3
; CHECK-NEXT: @ %bb.6:
-; CHECK-NEXT: add sp, #80
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, lr}
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
index 07c06e1..1769c5d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
@@ -17,17 +17,16 @@
define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input, ptr nocapture %Output, i16 signext %Size, i16 signext %N, i16 signext %Scale) local_unnamed_addr {
; ENABLED-LABEL: varying_outer_2d_reduction:
; ENABLED: @ %bb.0: @ %entry
-; ENABLED-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr}
-; ENABLED-NEXT: sub sp, #4
; ENABLED-NEXT: cmp r3, #1
-; ENABLED-NEXT: str r0, [sp] @ 4-byte Spill
-; ENABLED-NEXT: blt .LBB0_8
-; ENABLED-NEXT: @ %bb.1: @ %for.body.lr.ph
-; ENABLED-NEXT: ldr r0, [sp, #36]
-; ENABLED-NEXT: add.w r12, r2, #3
-; ENABLED-NEXT: ldr.w r10, [sp] @ 4-byte Reload
-; ENABLED-NEXT: mov.w r8, #0
-; ENABLED-NEXT: mov r9, r12
+; ENABLED-NEXT: it lt
+; ENABLED-NEXT: bxlt lr
+; ENABLED-NEXT: .LBB0_1: @ %for.body.lr.ph
+; ENABLED-NEXT: push.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; ENABLED-NEXT: mov r11, r0
+; ENABLED-NEXT: ldr r0, [sp, #32]
+; ENABLED-NEXT: add.w r9, r2, #3
+; ENABLED-NEXT: mov.w r12, #0
+; ENABLED-NEXT: mov r10, r11
; ENABLED-NEXT: uxth r0, r0
; ENABLED-NEXT: rsbs r5, r0, #0
; ENABLED-NEXT: b .LBB0_4
@@ -37,31 +36,32 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: @ in Loop: Header=BB0_4 Depth=1
; ENABLED-NEXT: lsrs r0, r0, #16
; ENABLED-NEXT: sub.w r9, r9, #1
-; ENABLED-NEXT: strh.w r0, [r1, r8, lsl #1]
-; ENABLED-NEXT: add.w r8, r8, #1
+; ENABLED-NEXT: strh.w r0, [r1, r12, lsl #1]
+; ENABLED-NEXT: add.w r12, r12, #1
; ENABLED-NEXT: add.w r10, r10, #2
-; ENABLED-NEXT: cmp r8, r3
+; ENABLED-NEXT: cmp r12, r3
; ENABLED-NEXT: beq .LBB0_8
; ENABLED-NEXT: .LBB0_4: @ %for.body
; ENABLED-NEXT: @ =>This Loop Header: Depth=1
; ENABLED-NEXT: @ Child Loop BB0_6 Depth 2
-; ENABLED-NEXT: cmp r2, r8
+; ENABLED-NEXT: cmp r2, r12
; ENABLED-NEXT: ble .LBB0_2
; ENABLED-NEXT: @ %bb.5: @ %vector.ph
; ENABLED-NEXT: @ in Loop: Header=BB0_4 Depth=1
; ENABLED-NEXT: bic r0, r9, #3
; ENABLED-NEXT: movs r7, #1
; ENABLED-NEXT: subs r0, #4
-; ENABLED-NEXT: sub.w r4, r2, r8
+; ENABLED-NEXT: sub.w r4, r2, r12
; ENABLED-NEXT: vmov.i32 q1, #0x0
; ENABLED-NEXT: add.w r6, r7, r0, lsr #2
-; ENABLED-NEXT: sub.w r0, r12, r8
+; ENABLED-NEXT: adds r0, r2, #3
+; ENABLED-NEXT: sub.w r0, r0, r12
; ENABLED-NEXT: bic r0, r0, #3
; ENABLED-NEXT: subs r0, #4
; ENABLED-NEXT: add.w r0, r7, r0, lsr #2
; ENABLED-NEXT: mov r7, r10
; ENABLED-NEXT: dls lr, r0
-; ENABLED-NEXT: ldr r0, [sp] @ 4-byte Reload
+; ENABLED-NEXT: mov r0, r11
; ENABLED-NEXT: .LBB0_6: @ %vector.body
; ENABLED-NEXT: @ Parent Loop BB0_4 Depth=1
; ENABLED-NEXT: @ => This Inner Loop Header: Depth=2
@@ -82,23 +82,22 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: vpsel q0, q1, q0
; ENABLED-NEXT: vaddv.u32 r0, q0
; ENABLED-NEXT: b .LBB0_3
-; ENABLED-NEXT: .LBB0_8: @ %for.end17
-; ENABLED-NEXT: add sp, #4
-; ENABLED-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+; ENABLED-NEXT: .LBB0_8:
+; ENABLED-NEXT: pop.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; ENABLED-NEXT: bx lr
;
; NOREDUCTIONS-LABEL: varying_outer_2d_reduction:
; NOREDUCTIONS: @ %bb.0: @ %entry
-; NOREDUCTIONS-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr}
-; NOREDUCTIONS-NEXT: sub sp, #4
; NOREDUCTIONS-NEXT: cmp r3, #1
-; NOREDUCTIONS-NEXT: str r0, [sp] @ 4-byte Spill
-; NOREDUCTIONS-NEXT: blt .LBB0_8
-; NOREDUCTIONS-NEXT: @ %bb.1: @ %for.body.lr.ph
-; NOREDUCTIONS-NEXT: ldr r0, [sp, #36]
-; NOREDUCTIONS-NEXT: add.w r12, r2, #3
-; NOREDUCTIONS-NEXT: ldr.w r10, [sp] @ 4-byte Reload
-; NOREDUCTIONS-NEXT: mov.w r8, #0
-; NOREDUCTIONS-NEXT: mov r9, r12
+; NOREDUCTIONS-NEXT: it lt
+; NOREDUCTIONS-NEXT: bxlt lr
+; NOREDUCTIONS-NEXT: .LBB0_1: @ %for.body.lr.ph
+; NOREDUCTIONS-NEXT: push.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; NOREDUCTIONS-NEXT: mov r11, r0
+; NOREDUCTIONS-NEXT: ldr r0, [sp, #32]
+; NOREDUCTIONS-NEXT: add.w r9, r2, #3
+; NOREDUCTIONS-NEXT: mov.w r12, #0
+; NOREDUCTIONS-NEXT: mov r10, r11
; NOREDUCTIONS-NEXT: uxth r0, r0
; NOREDUCTIONS-NEXT: rsbs r5, r0, #0
; NOREDUCTIONS-NEXT: b .LBB0_4
@@ -108,31 +107,32 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: @ in Loop: Header=BB0_4 Depth=1
; NOREDUCTIONS-NEXT: lsrs r0, r0, #16
; NOREDUCTIONS-NEXT: sub.w r9, r9, #1
-; NOREDUCTIONS-NEXT: strh.w r0, [r1, r8, lsl #1]
-; NOREDUCTIONS-NEXT: add.w r8, r8, #1
+; NOREDUCTIONS-NEXT: strh.w r0, [r1, r12, lsl #1]
+; NOREDUCTIONS-NEXT: add.w r12, r12, #1
; NOREDUCTIONS-NEXT: add.w r10, r10, #2
-; NOREDUCTIONS-NEXT: cmp r8, r3
+; NOREDUCTIONS-NEXT: cmp r12, r3
; NOREDUCTIONS-NEXT: beq .LBB0_8
; NOREDUCTIONS-NEXT: .LBB0_4: @ %for.body
; NOREDUCTIONS-NEXT: @ =>This Loop Header: Depth=1
; NOREDUCTIONS-NEXT: @ Child Loop BB0_6 Depth 2
-; NOREDUCTIONS-NEXT: cmp r2, r8
+; NOREDUCTIONS-NEXT: cmp r2, r12
; NOREDUCTIONS-NEXT: ble .LBB0_2
; NOREDUCTIONS-NEXT: @ %bb.5: @ %vector.ph
; NOREDUCTIONS-NEXT: @ in Loop: Header=BB0_4 Depth=1
; NOREDUCTIONS-NEXT: bic r0, r9, #3
; NOREDUCTIONS-NEXT: movs r7, #1
; NOREDUCTIONS-NEXT: subs r0, #4
-; NOREDUCTIONS-NEXT: sub.w r4, r2, r8
+; NOREDUCTIONS-NEXT: sub.w r4, r2, r12
; NOREDUCTIONS-NEXT: vmov.i32 q1, #0x0
; NOREDUCTIONS-NEXT: add.w r6, r7, r0, lsr #2
-; NOREDUCTIONS-NEXT: sub.w r0, r12, r8
+; NOREDUCTIONS-NEXT: adds r0, r2, #3
+; NOREDUCTIONS-NEXT: sub.w r0, r0, r12
; NOREDUCTIONS-NEXT: bic r0, r0, #3
; NOREDUCTIONS-NEXT: subs r0, #4
; NOREDUCTIONS-NEXT: add.w r0, r7, r0, lsr #2
; NOREDUCTIONS-NEXT: mov r7, r10
; NOREDUCTIONS-NEXT: dls lr, r0
-; NOREDUCTIONS-NEXT: ldr r0, [sp] @ 4-byte Reload
+; NOREDUCTIONS-NEXT: mov r0, r11
; NOREDUCTIONS-NEXT: .LBB0_6: @ %vector.body
; NOREDUCTIONS-NEXT: @ Parent Loop BB0_4 Depth=1
; NOREDUCTIONS-NEXT: @ => This Inner Loop Header: Depth=2
@@ -153,9 +153,9 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: vpsel q0, q1, q0
; NOREDUCTIONS-NEXT: vaddv.u32 r0, q0
; NOREDUCTIONS-NEXT: b .LBB0_3
-; NOREDUCTIONS-NEXT: .LBB0_8: @ %for.end17
-; NOREDUCTIONS-NEXT: add sp, #4
-; NOREDUCTIONS-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+; NOREDUCTIONS-NEXT: .LBB0_8:
+; NOREDUCTIONS-NEXT: pop.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; NOREDUCTIONS-NEXT: bx lr
entry:
%conv = sext i16 %N to i32
%cmp36 = icmp sgt i16 %N, 0
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination.ll
index e0a61b1..78dc35b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination.ll
@@ -49,18 +49,17 @@ define i32 @vcmp_new_vpst_combination(i32 %len, ptr nocapture readonly %arr) {
; CHECK-NEXT: cmp r0, #1
; CHECK-NEXT: blt .LBB1_4
; CHECK-NEXT: @ %bb.1: @ %vector.ph
-; CHECK-NEXT: vmov.i32 q0, #0x0
-; CHECK-NEXT: vmov.i32 q1, #0x1
+; CHECK-NEXT: vmov.i32 q0, #0x1
; CHECK-NEXT: movs r2, #0
; CHECK-NEXT: dlstp.32 lr, r0
; CHECK-NEXT: .LBB1_2: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrw.u32 q2, [r1], #16
-; CHECK-NEXT: vcmp.i32 ne, q2, zr
-; CHECK-NEXT: vmov q2, q0
+; CHECK-NEXT: vldrw.u32 q1, [r1], #16
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmov.i32 q1, #0x0
; CHECK-NEXT: vpst
-; CHECK-NEXT: vmovt q2, q1
-; CHECK-NEXT: vaddva.u32 r2, q2
+; CHECK-NEXT: vmovt q1, q0
+; CHECK-NEXT: vaddva.u32 r2, q1
; CHECK-NEXT: letp lr, .LBB1_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: mov r0, r2
diff --git a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
index c8dd949..a904347 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
@@ -993,10 +993,10 @@ define void @fir(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr no
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; CHECK-NEXT: .pad #24
-; CHECK-NEXT: sub sp, #24
+; CHECK-NEXT: .pad #20
+; CHECK-NEXT: sub sp, #20
; CHECK-NEXT: cmp r3, #8
-; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: blo.w .LBB16_12
; CHECK-NEXT: @ %bb.1: @ %if.then
; CHECK-NEXT: lsrs.w r12, r3, #2
@@ -1016,50 +1016,48 @@ define void @fir(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr no
; CHECK-NEXT: str r1, [sp] @ 4-byte Spill
; CHECK-NEXT: subs r1, r7, #2
; CHECK-NEXT: rsbs r7, r4, #0
-; CHECK-NEXT: str r7, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: add.w r7, r3, #16
-; CHECK-NEXT: str r4, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: str r4, [sp, #8] @ 4-byte Spill
; CHECK-NEXT: str r7, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: str r0, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: b .LBB16_6
; CHECK-NEXT: .LBB16_3: @ %while.end.loopexit
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
-; CHECK-NEXT: add.w r5, r5, r0, lsl #1
+; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: add.w r6, r6, r0, lsl #1
; CHECK-NEXT: b .LBB16_5
; CHECK-NEXT: .LBB16_4: @ %for.end
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
; CHECK-NEXT: wls lr, r0, .LBB16_5
; CHECK-NEXT: b .LBB16_10
; CHECK-NEXT: .LBB16_5: @ %while.end
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT: subs.w r12, r12, #1
; CHECK-NEXT: vstrb.8 q0, [r2], #8
-; CHECK-NEXT: add.w r0, r5, r0, lsl #1
+; CHECK-NEXT: add.w r0, r6, r0, lsl #1
; CHECK-NEXT: add.w r5, r0, #8
; CHECK-NEXT: beq.w .LBB16_12
; CHECK-NEXT: .LBB16_6: @ %while.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB16_8 Depth 2
; CHECK-NEXT: @ Child Loop BB16_11 Depth 2
-; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
; CHECK-NEXT: ldrh.w lr, [r3, #14]
; CHECK-NEXT: vldrw.u32 q0, [r0], #8
-; CHECK-NEXT: ldrh.w r8, [r3, #12]
+; CHECK-NEXT: ldrh.w r10, [r3, #12]
; CHECK-NEXT: ldrh r7, [r3, #10]
; CHECK-NEXT: ldrh r4, [r3, #8]
; CHECK-NEXT: ldrh r6, [r3, #6]
; CHECK-NEXT: ldrh.w r9, [r3, #4]
; CHECK-NEXT: ldrh.w r11, [r3, #2]
-; CHECK-NEXT: ldrh.w r10, [r3]
+; CHECK-NEXT: ldrh.w r8, [r3]
; CHECK-NEXT: vstrb.8 q0, [r1], #8
; CHECK-NEXT: vldrw.u32 q0, [r5]
-; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: str r0, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: adds r0, r5, #2
; CHECK-NEXT: vldrw.u32 q1, [r0]
-; CHECK-NEXT: vmul.f16 q0, q0, r10
+; CHECK-NEXT: vmul.f16 q0, q0, r8
; CHECK-NEXT: adds r0, r5, #6
; CHECK-NEXT: vfma.f16 q0, q1, r11
; CHECK-NEXT: vldrw.u32 q1, [r5, #4]
@@ -1068,73 +1066,73 @@ define void @fir(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr no
; CHECK-NEXT: add.w r0, r5, #10
; CHECK-NEXT: vfma.f16 q0, q1, r6
; CHECK-NEXT: vldrw.u32 q1, [r5, #8]
+; CHECK-NEXT: add.w r6, r5, #16
; CHECK-NEXT: vfma.f16 q0, q1, r4
; CHECK-NEXT: vldrw.u32 q1, [r0]
; CHECK-NEXT: add.w r0, r5, #14
; CHECK-NEXT: vfma.f16 q0, q1, r7
; CHECK-NEXT: vldrw.u32 q1, [r5, #12]
-; CHECK-NEXT: adds r5, #16
-; CHECK-NEXT: vfma.f16 q0, q1, r8
+; CHECK-NEXT: vfma.f16 q0, q1, r10
; CHECK-NEXT: vldrw.u32 q1, [r0]
-; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: vfma.f16 q0, q1, lr
; CHECK-NEXT: cmp r0, #16
; CHECK-NEXT: blo .LBB16_9
; CHECK-NEXT: @ %bb.7: @ %for.body.preheader
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT: add.w r5, r3, #16
; CHECK-NEXT: dls lr, r0
-; CHECK-NEXT: ldr r6, [sp, #4] @ 4-byte Reload
; CHECK-NEXT: .LBB16_8: @ %for.body
; CHECK-NEXT: @ Parent Loop BB16_6 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldrh r0, [r6], #16
-; CHECK-NEXT: vldrw.u32 q1, [r5]
-; CHECK-NEXT: adds r4, r5, #2
+; CHECK-NEXT: ldrh r0, [r5], #16
+; CHECK-NEXT: vldrw.u32 q1, [r6]
+; CHECK-NEXT: adds r4, r6, #2
; CHECK-NEXT: vfma.f16 q0, q1, r0
; CHECK-NEXT: vldrw.u32 q1, [r4]
-; CHECK-NEXT: ldrh r0, [r6, #-14]
-; CHECK-NEXT: adds r4, r5, #6
+; CHECK-NEXT: ldrh r0, [r5, #-14]
+; CHECK-NEXT: adds r4, r6, #6
; CHECK-NEXT: vfma.f16 q0, q1, r0
-; CHECK-NEXT: ldrh r0, [r6, #-12]
-; CHECK-NEXT: vldrw.u32 q1, [r5, #4]
+; CHECK-NEXT: ldrh r0, [r5, #-12]
+; CHECK-NEXT: vldrw.u32 q1, [r6, #4]
; CHECK-NEXT: vfma.f16 q0, q1, r0
; CHECK-NEXT: vldrw.u32 q1, [r4]
-; CHECK-NEXT: ldrh r0, [r6, #-10]
-; CHECK-NEXT: add.w r4, r5, #10
+; CHECK-NEXT: ldrh r0, [r5, #-10]
+; CHECK-NEXT: add.w r4, r6, #10
; CHECK-NEXT: vfma.f16 q0, q1, r0
-; CHECK-NEXT: ldrh r0, [r6, #-8]
-; CHECK-NEXT: vldrw.u32 q1, [r5, #8]
+; CHECK-NEXT: ldrh r0, [r5, #-8]
+; CHECK-NEXT: vldrw.u32 q1, [r6, #8]
; CHECK-NEXT: vfma.f16 q0, q1, r0
; CHECK-NEXT: vldrw.u32 q1, [r4]
-; CHECK-NEXT: ldrh r0, [r6, #-6]
-; CHECK-NEXT: ldrh r4, [r6, #-2]
+; CHECK-NEXT: ldrh r0, [r5, #-6]
+; CHECK-NEXT: ldrh r4, [r5, #-2]
; CHECK-NEXT: vfma.f16 q0, q1, r0
-; CHECK-NEXT: ldrh r0, [r6, #-4]
-; CHECK-NEXT: vldrw.u32 q1, [r5, #12]
+; CHECK-NEXT: ldrh r0, [r5, #-4]
+; CHECK-NEXT: vldrw.u32 q1, [r6, #12]
; CHECK-NEXT: vfma.f16 q0, q1, r0
-; CHECK-NEXT: add.w r0, r5, #14
+; CHECK-NEXT: add.w r0, r6, #14
; CHECK-NEXT: vldrw.u32 q1, [r0]
-; CHECK-NEXT: adds r5, #16
+; CHECK-NEXT: adds r6, #16
; CHECK-NEXT: vfma.f16 q0, q1, r4
; CHECK-NEXT: le lr, .LBB16_8
; CHECK-NEXT: b .LBB16_4
; CHECK-NEXT: .LBB16_9: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r6, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: add.w r5, r3, #16
; CHECK-NEXT: b .LBB16_4
; CHECK-NEXT: .LBB16_10: @ %while.body76.preheader
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: mov r0, r6
; CHECK-NEXT: .LBB16_11: @ %while.body76
; CHECK-NEXT: @ Parent Loop BB16_6 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldrh r4, [r6], #2
+; CHECK-NEXT: ldrh r4, [r5], #2
; CHECK-NEXT: vldrh.u16 q1, [r0], #2
; CHECK-NEXT: vfma.f16 q0, q1, r4
; CHECK-NEXT: le lr, .LBB16_11
; CHECK-NEXT: b .LBB16_3
; CHECK-NEXT: .LBB16_12: @ %if.end
-; CHECK-NEXT: add sp, #24
+; CHECK-NEXT: add sp, #20
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
entry:
%pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 1
diff --git a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
index 28166e4..f7b4548 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
@@ -995,46 +995,44 @@ define void @fir(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr no
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
+; CHECK-NEXT: .pad #24
+; CHECK-NEXT: sub sp, #24
; CHECK-NEXT: ldrh r6, [r0]
-; CHECK-NEXT: movs r5, #1
-; CHECK-NEXT: ldrd r4, r10, [r0, #4]
+; CHECK-NEXT: movs r4, #1
+; CHECK-NEXT: ldrd r7, r10, [r0, #4]
; CHECK-NEXT: sub.w r0, r6, #8
; CHECK-NEXT: add.w r3, r0, r0, lsr #29
; CHECK-NEXT: and r0, r0, #7
-; CHECK-NEXT: asrs r7, r3, #3
-; CHECK-NEXT: cmp r7, #1
+; CHECK-NEXT: asrs r5, r3, #3
+; CHECK-NEXT: cmp r5, #1
; CHECK-NEXT: it gt
-; CHECK-NEXT: asrgt r5, r3, #3
-; CHECK-NEXT: add.w r3, r4, r6, lsl #2
+; CHECK-NEXT: asrgt r4, r3, #3
+; CHECK-NEXT: add.w r3, r7, r6, lsl #2
; CHECK-NEXT: sub.w r9, r3, #4
; CHECK-NEXT: rsbs r3, r6, #0
-; CHECK-NEXT: str r3, [sp, #12] @ 4-byte Spill
-; CHECK-NEXT: add.w r3, r10, #32
-; CHECK-NEXT: str r5, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: str r6, [sp, #16] @ 4-byte Spill
-; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: str r4, [sp] @ 4-byte Spill
+; CHECK-NEXT: str r6, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT: str r0, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: b .LBB16_6
; CHECK-NEXT: .LBB16_3: @ %while.end.loopexit
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
-; CHECK-NEXT: add.w r4, r4, r0, lsl #2
+; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: add.w r7, r7, r0, lsl #2
; CHECK-NEXT: b .LBB16_5
; CHECK-NEXT: .LBB16_4: @ %for.end
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r1, [sp, #28] @ 4-byte Reload
-; CHECK-NEXT: ldrd r0, r9, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT: ldrd r0, r9, [sp, #12] @ 8-byte Folded Reload
; CHECK-NEXT: wls lr, r0, .LBB16_5
; CHECK-NEXT: b .LBB16_10
; CHECK-NEXT: .LBB16_5: @ %while.end
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT: subs.w r12, r12, #1
; CHECK-NEXT: vstrb.8 q0, [r2], #16
-; CHECK-NEXT: add.w r0, r4, r0, lsl #2
-; CHECK-NEXT: add.w r4, r0, #16
+; CHECK-NEXT: add.w r0, r7, r0, lsl #2
+; CHECK-NEXT: add.w r7, r0, #16
; CHECK-NEXT: beq .LBB16_12
; CHECK-NEXT: .LBB16_6: @ %while.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
@@ -1042,76 +1040,76 @@ define void @fir(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr no
; CHECK-NEXT: @ Child Loop BB16_11 Depth 2
; CHECK-NEXT: add.w lr, r10, #8
; CHECK-NEXT: vldrw.u32 q0, [r1], #16
-; CHECK-NEXT: ldrd r3, r7, [r10]
+; CHECK-NEXT: ldrd r3, r4, [r10]
; CHECK-NEXT: ldm.w lr, {r0, r5, r6, lr}
; CHECK-NEXT: ldrd r11, r8, [r10, #24]
; CHECK-NEXT: vstrb.8 q0, [r9], #16
-; CHECK-NEXT: vldrw.u32 q0, [r4], #32
-; CHECK-NEXT: str r1, [sp, #28] @ 4-byte Spill
-; CHECK-NEXT: str.w r9, [sp, #24] @ 4-byte Spill
-; CHECK-NEXT: vldrw.u32 q1, [r4, #-28]
+; CHECK-NEXT: vldrw.u32 q0, [r7], #32
+; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: str.w r9, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: vldrw.u32 q1, [r7, #-28]
; CHECK-NEXT: vmul.f32 q0, q0, r3
-; CHECK-NEXT: vldrw.u32 q6, [r4, #-24]
-; CHECK-NEXT: vldrw.u32 q4, [r4, #-20]
-; CHECK-NEXT: vfma.f32 q0, q1, r7
-; CHECK-NEXT: vldrw.u32 q5, [r4, #-16]
+; CHECK-NEXT: vldrw.u32 q6, [r7, #-24]
+; CHECK-NEXT: vldrw.u32 q4, [r7, #-20]
+; CHECK-NEXT: vfma.f32 q0, q1, r4
+; CHECK-NEXT: vldrw.u32 q5, [r7, #-16]
; CHECK-NEXT: vfma.f32 q0, q6, r0
-; CHECK-NEXT: vldrw.u32 q2, [r4, #-12]
+; CHECK-NEXT: vldrw.u32 q2, [r7, #-12]
; CHECK-NEXT: vfma.f32 q0, q4, r5
-; CHECK-NEXT: vldrw.u32 q3, [r4, #-8]
+; CHECK-NEXT: vldrw.u32 q3, [r7, #-8]
; CHECK-NEXT: vfma.f32 q0, q5, r6
-; CHECK-NEXT: vldrw.u32 q1, [r4, #-4]
+; CHECK-NEXT: vldrw.u32 q1, [r7, #-4]
; CHECK-NEXT: vfma.f32 q0, q2, lr
-; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: vfma.f32 q0, q3, r11
; CHECK-NEXT: vfma.f32 q0, q1, r8
; CHECK-NEXT: cmp r0, #16
; CHECK-NEXT: blo .LBB16_9
; CHECK-NEXT: @ %bb.7: @ %for.body.preheader
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT: add.w r4, r10, #32
; CHECK-NEXT: dls lr, r0
-; CHECK-NEXT: ldr r7, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: .LBB16_8: @ %for.body
; CHECK-NEXT: @ Parent Loop BB16_6 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldm.w r7, {r0, r3, r5, r6, r8, r11}
-; CHECK-NEXT: vldrw.u32 q1, [r4], #32
-; CHECK-NEXT: vldrw.u32 q6, [r4, #-24]
-; CHECK-NEXT: vldrw.u32 q4, [r4, #-20]
+; CHECK-NEXT: ldm.w r4, {r0, r3, r5, r6, r8, r11}
+; CHECK-NEXT: vldrw.u32 q1, [r7], #32
+; CHECK-NEXT: vldrw.u32 q6, [r7, #-24]
+; CHECK-NEXT: vldrw.u32 q4, [r7, #-20]
; CHECK-NEXT: vfma.f32 q0, q1, r0
-; CHECK-NEXT: vldrw.u32 q1, [r4, #-28]
-; CHECK-NEXT: vldrw.u32 q5, [r4, #-16]
-; CHECK-NEXT: vldrw.u32 q2, [r4, #-12]
+; CHECK-NEXT: vldrw.u32 q1, [r7, #-28]
+; CHECK-NEXT: vldrw.u32 q5, [r7, #-16]
+; CHECK-NEXT: vldrw.u32 q2, [r7, #-12]
; CHECK-NEXT: vfma.f32 q0, q1, r3
-; CHECK-NEXT: ldrd r9, r1, [r7, #24]
+; CHECK-NEXT: ldrd r9, r1, [r4, #24]
; CHECK-NEXT: vfma.f32 q0, q6, r5
-; CHECK-NEXT: vldrw.u32 q3, [r4, #-8]
+; CHECK-NEXT: vldrw.u32 q3, [r7, #-8]
; CHECK-NEXT: vfma.f32 q0, q4, r6
-; CHECK-NEXT: vldrw.u32 q1, [r4, #-4]
+; CHECK-NEXT: vldrw.u32 q1, [r7, #-4]
; CHECK-NEXT: vfma.f32 q0, q5, r8
-; CHECK-NEXT: adds r7, #32
+; CHECK-NEXT: adds r4, #32
; CHECK-NEXT: vfma.f32 q0, q2, r11
; CHECK-NEXT: vfma.f32 q0, q3, r9
; CHECK-NEXT: vfma.f32 q0, q1, r1
; CHECK-NEXT: le lr, .LBB16_8
; CHECK-NEXT: b .LBB16_4
; CHECK-NEXT: .LBB16_9: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: ldr r7, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: add.w r4, r10, #32
; CHECK-NEXT: b .LBB16_4
; CHECK-NEXT: .LBB16_10: @ %while.body76.preheader
; CHECK-NEXT: @ in Loop: Header=BB16_6 Depth=1
-; CHECK-NEXT: mov r3, r4
+; CHECK-NEXT: mov r3, r7
; CHECK-NEXT: .LBB16_11: @ %while.body76
; CHECK-NEXT: @ Parent Loop BB16_6 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldr r0, [r7], #4
+; CHECK-NEXT: ldr r0, [r4], #4
; CHECK-NEXT: vldrw.u32 q1, [r3], #4
; CHECK-NEXT: vfma.f32 q0, q1, r0
; CHECK-NEXT: le lr, .LBB16_11
; CHECK-NEXT: b .LBB16_3
; CHECK-NEXT: .LBB16_12:
-; CHECK-NEXT: add sp, #32
+; CHECK-NEXT: add sp, #24
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index e8b49c1..0d86f22 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -711,8 +711,8 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #136
-; CHECK-NEXT: sub sp, #136
+; CHECK-NEXT: .pad #120
+; CHECK-NEXT: sub sp, #120
; CHECK-NEXT: cmp r2, #1
; CHECK-NEXT: strd r1, r2, [sp, #64] @ 8-byte Folded Spill
; CHECK-NEXT: blt.w .LBB14_5
@@ -725,22 +725,20 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: subs r1, #8
; CHECK-NEXT: vstrw.32 q0, [sp, #40] @ 16-byte Spill
-; CHECK-NEXT: vmov.i16 q2, #0x18
; CHECK-NEXT: add.w r1, r2, r1, lsr #3
; CHECK-NEXT: str r1, [sp, #60] @ 4-byte Spill
; CHECK-NEXT: adr r1, .LCPI14_0
; CHECK-NEXT: adr r2, .LCPI14_1
; CHECK-NEXT: vldrw.u32 q0, [r1]
-; CHECK-NEXT: vstrw.32 q2, [sp, #72] @ 16-byte Spill
; CHECK-NEXT: vstrw.32 q0, [sp, #24] @ 16-byte Spill
; CHECK-NEXT: vldrw.u32 q0, [r2]
-; CHECK-NEXT: add r2, sp, #120
+; CHECK-NEXT: add r2, sp, #104
; CHECK-NEXT: vstrw.32 q0, [sp, #8] @ 16-byte Spill
; CHECK-NEXT: .LBB14_2: @ %vector.ph
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB14_3 Depth 2
; CHECK-NEXT: ldr r1, [sp, #60] @ 4-byte Reload
-; CHECK-NEXT: add.w r10, sp, #104
+; CHECK-NEXT: add.w r10, sp, #88
; CHECK-NEXT: dls lr, r1
; CHECK-NEXT: ldr r7, [sp, #64] @ 4-byte Reload
; CHECK-NEXT: vldrw.u32 q4, [sp, #24] @ 16-byte Reload
@@ -762,7 +760,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: vmov r6, r2, d4
; CHECK-NEXT: ldrh r1, [r1]
; CHECK-NEXT: ldrh.w r12, [r4]
-; CHECK-NEXT: add r4, sp, #88
+; CHECK-NEXT: add r4, sp, #72
; CHECK-NEXT: ldrh.w r11, [r5]
; CHECK-NEXT: ldrh r3, [r3]
; CHECK-NEXT: ldrh r5, [r6]
@@ -807,7 +805,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: vmov.16 q3[0], r2
; CHECK-NEXT: vmov.16 q3[1], r5
; CHECK-NEXT: vmov r2, r5, d5
-; CHECK-NEXT: vldrw.u32 q2, [sp, #72] @ 16-byte Reload
+; CHECK-NEXT: vmov.i16 q2, #0x18
; CHECK-NEXT: vadd.i16 q6, q6, q2
; CHECK-NEXT: vadd.i16 q5, q5, q2
; CHECK-NEXT: vadd.i16 q4, q4, q2
@@ -849,7 +847,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: cmp r1, r3
; CHECK-NEXT: bne.w .LBB14_2
; CHECK-NEXT: .LBB14_5: @ %for.cond.cleanup
-; CHECK-NEXT: add sp, #136
+; CHECK-NEXT: add sp, #120
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
@@ -950,7 +948,6 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(ptr noalias nocapture read
; CHECK-NEXT: vstrw.32 q0, [sp, #80] @ 16-byte Spill
; CHECK-NEXT: vldrw.u32 q0, [r6]
; CHECK-NEXT: adr r6, .LCPI15_9
-; CHECK-NEXT: vmov.i32 q2, #0x30
; CHECK-NEXT: vstrw.32 q0, [sp, #64] @ 16-byte Spill
; CHECK-NEXT: vldrw.u32 q0, [r7]
; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill
@@ -963,212 +960,213 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(ptr noalias nocapture read
; CHECK-NEXT: .LBB15_2: @ %vector.ph
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB15_3 Depth 2
+; CHECK-NEXT: vldrw.u32 q2, [sp, #16] @ 16-byte Reload
; CHECK-NEXT: adr r1, .LCPI15_3
-; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT: vldrw.u32 q0, [r1]
-; CHECK-NEXT: adr r1, .LCPI15_4
; CHECK-NEXT: vldrw.u32 q5, [r1]
+; CHECK-NEXT: adr r1, .LCPI15_4
+; CHECK-NEXT: vstrw.32 q2, [sp, #296] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q2, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q6, [r1]
; CHECK-NEXT: adr r1, .LCPI15_2
-; CHECK-NEXT: vldrw.u32 q3, [r1]
+; CHECK-NEXT: vstrw.32 q2, [sp, #280] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q2, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q1, [r1]
; CHECK-NEXT: adr r1, .LCPI15_10
-; CHECK-NEXT: vstrw.32 q6, [sp, #280] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q6, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT: vstrw.32 q3, [sp, #296] @ 16-byte Spill
+; CHECK-NEXT: vstrw.32 q2, [sp, #264] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q2, [sp, #80] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q3, [r1]
; CHECK-NEXT: adr r1, .LCPI15_11
; CHECK-NEXT: ldr.w r8, [sp, #116] @ 4-byte Reload
-; CHECK-NEXT: vstrw.32 q3, [sp, #248] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q3, [sp, #80] @ 16-byte Reload
-; CHECK-NEXT: vstrw.32 q6, [sp, #264] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q6, [sp, #48] @ 16-byte Reload
-; CHECK-NEXT: vstrw.32 q3, [sp, #216] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q3, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT: vstrw.32 q2, [sp, #248] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q2, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q0, [sp, #96] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q7, [r1]
-; CHECK-NEXT: vldrw.u32 q1, [sp] @ 16-byte Reload
-; CHECK-NEXT: vstrw.32 q3, [sp, #200] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q3, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload
; CHECK-NEXT: mov r11, r10
-; CHECK-NEXT: vstrw.32 q6, [sp, #232] @ 16-byte Spill
-; CHECK-NEXT: vstrw.32 q3, [sp, #184] @ 16-byte Spill
+; CHECK-NEXT: vstrw.32 q2, [sp, #232] @ 16-byte Spill
+; CHECK-NEXT: vstrw.32 q0, [sp, #216] @ 16-byte Spill
; CHECK-NEXT: .LBB15_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB15_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: vadd.i32 q4, q1, r0
-; CHECK-NEXT: vstrw.32 q7, [sp, #136] @ 16-byte Spill
-; CHECK-NEXT: vmov r1, lr, d8
-; CHECK-NEXT: vadd.i32 q7, q7, r0
-; CHECK-NEXT: vmov r5, r4, d15
-; CHECK-NEXT: vadd.i32 q6, q0, r0
-; CHECK-NEXT: vmov r6, r7, d13
+; CHECK-NEXT: vmov q0, q7
+; CHECK-NEXT: vstrw.32 q7, [sp, #184] @ 16-byte Spill
+; CHECK-NEXT: vadd.i32 q7, q5, r0
+; CHECK-NEXT: vstrw.32 q5, [sp, #200] @ 16-byte Spill
+; CHECK-NEXT: vadd.i32 q5, q0, r0
+; CHECK-NEXT: vmov q0, q6
+; CHECK-NEXT: vadd.i32 q6, q4, r0
+; CHECK-NEXT: vmov r5, r4, d11
+; CHECK-NEXT: vmov r1, lr, d12
+; CHECK-NEXT: vadd.i32 q2, q1, r0
+; CHECK-NEXT: vmov r6, r7, d15
; CHECK-NEXT: vstrw.32 q1, [sp, #152] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q1, [sp, #296] @ 16-byte Reload
-; CHECK-NEXT: vstrw.32 q0, [sp, #168] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q0, [sp, #248] @ 16-byte Reload
-; CHECK-NEXT: vldrw.u32 q3, [sp, #216] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q1, q1, r0
-; CHECK-NEXT: vstrw.32 q5, [sp, #120] @ 16-byte Spill
-; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vmov q1, q3
+; CHECK-NEXT: vstrw.32 q4, [sp, #168] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q4, [sp, #248] @ 16-byte Reload
+; CHECK-NEXT: vstrw.32 q0, [sp, #120] @ 16-byte Spill
+; CHECK-NEXT: vstrw.32 q3, [sp, #136] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q3, [sp, #184] @ 16-byte Reload
; CHECK-NEXT: subs.w r11, r11, #16
-; CHECK-NEXT: ldrb.w r9, [r1]
-; CHECK-NEXT: vmov r1, r3, d14
; CHECK-NEXT: ldrb r5, [r5]
+; CHECK-NEXT: ldrb.w r9, [r1]
+; CHECK-NEXT: vmov r1, r3, d10
; CHECK-NEXT: ldrb r7, [r7]
; CHECK-NEXT: ldrb r1, [r1]
-; CHECK-NEXT: vmov.8 q7[0], r1
+; CHECK-NEXT: vmov.8 q5[0], r1
; CHECK-NEXT: ldrb r1, [r3]
-; CHECK-NEXT: vmov.8 q7[1], r1
-; CHECK-NEXT: vmov r1, r3, d12
-; CHECK-NEXT: vmov.8 q7[2], r5
+; CHECK-NEXT: vmov.8 q5[1], r1
+; CHECK-NEXT: vmov r1, r3, d14
+; CHECK-NEXT: vmov.8 q5[2], r5
; CHECK-NEXT: ldrb r5, [r6]
; CHECK-NEXT: ldrb r6, [r4]
-; CHECK-NEXT: vmov.8 q7[3], r6
+; CHECK-NEXT: vmov.8 q5[3], r6
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: ldrb r3, [r3]
-; CHECK-NEXT: vmov.8 q6[0], r1
-; CHECK-NEXT: vmov r6, r1, d2
-; CHECK-NEXT: vmov.8 q6[1], r3
-; CHECK-NEXT: vmov.8 q6[2], r5
-; CHECK-NEXT: vmov.8 q6[3], r7
+; CHECK-NEXT: vmov.8 q7[0], r1
+; CHECK-NEXT: vmov r6, r1, d4
+; CHECK-NEXT: vmov.8 q7[1], r3
+; CHECK-NEXT: vmov.8 q7[2], r5
+; CHECK-NEXT: vmov.8 q7[3], r7
; CHECK-NEXT: ldrb.w r7, [lr]
-; CHECK-NEXT: vmov.8 q6[4], r9
-; CHECK-NEXT: vmov.8 q6[5], r7
+; CHECK-NEXT: vmov.8 q7[4], r9
+; CHECK-NEXT: vmov.8 q7[5], r7
; CHECK-NEXT: ldrb r4, [r1]
-; CHECK-NEXT: vmov r1, r5, d3
-; CHECK-NEXT: vldrw.u32 q1, [sp, #232] @ 16-byte Reload
+; CHECK-NEXT: vmov r1, r5, d5
+; CHECK-NEXT: vadd.i32 q2, q1, r0
+; CHECK-NEXT: vldrw.u32 q1, [sp, #280] @ 16-byte Reload
; CHECK-NEXT: ldrb.w r12, [r1]
-; CHECK-NEXT: vmov r1, r3, d9
+; CHECK-NEXT: vmov r1, r3, d13
; CHECK-NEXT: ldrb r5, [r5]
-; CHECK-NEXT: vldrw.u32 q4, [sp, #184] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q6, [sp, #232] @ 16-byte Reload
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: ldrb r3, [r3]
-; CHECK-NEXT: vmov.8 q6[6], r1
-; CHECK-NEXT: vmov r1, r7, d0
-; CHECK-NEXT: vmov.8 q6[7], r3
+; CHECK-NEXT: vmov.8 q7[6], r1
+; CHECK-NEXT: vmov r1, r7, d4
+; CHECK-NEXT: vmov.8 q7[7], r3
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: ldrb r7, [r7]
-; CHECK-NEXT: vmov.8 q7[4], r1
-; CHECK-NEXT: vmov r1, r3, d1
-; CHECK-NEXT: vldrw.u32 q0, [sp, #264] @ 16-byte Reload
-; CHECK-NEXT: vmov.8 q7[5], r7
-; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vmov.8 q5[4], r1
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: vmov.8 q5[5], r7
+; CHECK-NEXT: vadd.i32 q2, q1, r0
+; CHECK-NEXT: vldrw.u32 q1, [sp, #296] @ 16-byte Reload
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: ldrb r3, [r3]
-; CHECK-NEXT: vmov.8 q7[6], r1
+; CHECK-NEXT: vmov.8 q5[6], r1
; CHECK-NEXT: ldrb r1, [r6]
-; CHECK-NEXT: vmov r7, r6, d0
-; CHECK-NEXT: vmov.8 q7[7], r3
-; CHECK-NEXT: vmov r3, lr, d1
-; CHECK-NEXT: vldrw.u32 q0, [sp, #280] @ 16-byte Reload
-; CHECK-NEXT: vmov.8 q7[8], r1
-; CHECK-NEXT: vadd.i32 q0, q0, r0
-; CHECK-NEXT: vmov.8 q7[9], r4
-; CHECK-NEXT: vmov r4, r1, d0
-; CHECK-NEXT: vmov.8 q7[10], r12
-; CHECK-NEXT: vmov.8 q7[11], r5
+; CHECK-NEXT: vmov.8 q5[7], r3
+; CHECK-NEXT: vmov r7, r6, d4
+; CHECK-NEXT: vmov r3, lr, d5
+; CHECK-NEXT: vmov.8 q5[8], r1
+; CHECK-NEXT: vadd.i32 q2, q1, r0
+; CHECK-NEXT: vmov.8 q5[9], r4
+; CHECK-NEXT: vmov r4, r1, d4
+; CHECK-NEXT: vmov.8 q5[10], r12
+; CHECK-NEXT: vmov.8 q5[11], r5
+; CHECK-NEXT: vldrw.u32 q1, [sp, #264] @ 16-byte Reload
; CHECK-NEXT: ldrb r7, [r7]
; CHECK-NEXT: ldrb r6, [r6]
; CHECK-NEXT: ldrb r3, [r3]
; CHECK-NEXT: ldrb r4, [r4]
; CHECK-NEXT: ldrb r1, [r1]
-; CHECK-NEXT: vmov.8 q6[8], r4
-; CHECK-NEXT: vmov r5, r4, d1
-; CHECK-NEXT: vmov.8 q6[9], r1
-; CHECK-NEXT: vadd.i32 q0, q5, r0
-; CHECK-NEXT: vldrw.u32 q5, [sp, #200] @ 16-byte Reload
+; CHECK-NEXT: vmov.8 q7[8], r4
+; CHECK-NEXT: vmov r5, r4, d5
+; CHECK-NEXT: vmov.8 q7[9], r1
+; CHECK-NEXT: vadd.i32 q2, q0, r0
+; CHECK-NEXT: vldrw.u32 q0, [sp, #216] @ 16-byte Reload
; CHECK-NEXT: ldrb r5, [r5]
; CHECK-NEXT: ldrb r4, [r4]
-; CHECK-NEXT: vmov.8 q6[10], r5
-; CHECK-NEXT: vmov.8 q6[11], r4
-; CHECK-NEXT: vmov.8 q6[12], r7
-; CHECK-NEXT: vmov.8 q6[13], r6
-; CHECK-NEXT: vmov.8 q6[14], r3
-; CHECK-NEXT: vmov r1, r3, d0
+; CHECK-NEXT: vmov.8 q7[10], r5
+; CHECK-NEXT: vmov.8 q7[11], r4
+; CHECK-NEXT: vmov.8 q7[12], r7
+; CHECK-NEXT: vmov.8 q7[13], r6
+; CHECK-NEXT: vmov.8 q7[14], r3
+; CHECK-NEXT: vmov r1, r3, d4
; CHECK-NEXT: ldrb r1, [r1]
-; CHECK-NEXT: vmov.8 q7[12], r1
+; CHECK-NEXT: vmov.8 q5[12], r1
; CHECK-NEXT: ldrb r1, [r3]
-; CHECK-NEXT: vmov.8 q7[13], r1
-; CHECK-NEXT: vmov r1, r3, d1
-; CHECK-NEXT: vadd.i32 q0, q1, r0
-; CHECK-NEXT: vadd.i32 q1, q1, q2
-; CHECK-NEXT: vstrw.32 q1, [sp, #232] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q1, [sp, #248] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q1, q1, q2
-; CHECK-NEXT: vstrw.32 q1, [sp, #248] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q1, [sp, #152] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q1, q1, q2
+; CHECK-NEXT: vmov.8 q5[13], r1
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: vadd.i32 q2, q1, r0
; CHECK-NEXT: ldrb r1, [r1]
-; CHECK-NEXT: vmov.8 q7[14], r1
+; CHECK-NEXT: vmov.8 q5[14], r1
; CHECK-NEXT: ldrb r1, [r3]
-; CHECK-NEXT: vmov.8 q7[15], r1
+; CHECK-NEXT: vmov.8 q5[15], r1
; CHECK-NEXT: ldrb.w r1, [lr]
-; CHECK-NEXT: vmov.8 q6[15], r1
-; CHECK-NEXT: vmov r1, r3, d0
-; CHECK-NEXT: vadd.i8 q6, q6, q7
+; CHECK-NEXT: vmov.8 q7[15], r1
+; CHECK-NEXT: vmov r1, r3, d4
+; CHECK-NEXT: vadd.i8 q5, q7, q5
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: ldrb r3, [r3]
; CHECK-NEXT: vmov.8 q7[0], r1
; CHECK-NEXT: vmov.8 q7[1], r3
-; CHECK-NEXT: vmov r1, r3, d1
-; CHECK-NEXT: vadd.i32 q0, q3, r0
-; CHECK-NEXT: vadd.i32 q3, q3, q2
-; CHECK-NEXT: vstrw.32 q3, [sp, #216] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q3, [sp, #296] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q3, q3, q2
-; CHECK-NEXT: vstrw.32 q3, [sp, #296] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q3, [sp, #280] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q3, q3, q2
-; CHECK-NEXT: vstrw.32 q3, [sp, #280] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q3, [sp, #264] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q3, q3, q2
-; CHECK-NEXT: vstrw.32 q3, [sp, #264] @ 16-byte Spill
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: vadd.i32 q2, q4, r0
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: vmov.8 q7[2], r1
; CHECK-NEXT: ldrb r1, [r3]
; CHECK-NEXT: vmov.8 q7[3], r1
-; CHECK-NEXT: vmov r1, r3, d0
+; CHECK-NEXT: vmov r1, r3, d4
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: vmov.8 q7[4], r1
; CHECK-NEXT: ldrb r1, [r3]
; CHECK-NEXT: vmov.8 q7[5], r1
-; CHECK-NEXT: vmov r1, r3, d1
-; CHECK-NEXT: vadd.i32 q0, q5, r0
-; CHECK-NEXT: vadd.i32 q5, q5, q2
-; CHECK-NEXT: vstrw.32 q5, [sp, #200] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q5, [sp, #120] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q5, q5, q2
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: vadd.i32 q2, q6, r0
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: vmov.8 q7[6], r1
; CHECK-NEXT: ldrb r1, [r3]
; CHECK-NEXT: vmov.8 q7[7], r1
-; CHECK-NEXT: vmov r1, r3, d0
+; CHECK-NEXT: vmov r1, r3, d4
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: vmov.8 q7[8], r1
; CHECK-NEXT: ldrb r1, [r3]
; CHECK-NEXT: vmov.8 q7[9], r1
-; CHECK-NEXT: vmov r1, r3, d1
-; CHECK-NEXT: vadd.i32 q0, q4, r0
-; CHECK-NEXT: vadd.i32 q4, q4, q2
-; CHECK-NEXT: vstrw.32 q4, [sp, #184] @ 16-byte Spill
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: vadd.i32 q2, q0, r0
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: vmov.8 q7[10], r1
; CHECK-NEXT: ldrb r1, [r3]
; CHECK-NEXT: vmov.8 q7[11], r1
-; CHECK-NEXT: vmov r1, r3, d0
+; CHECK-NEXT: vmov r1, r3, d4
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: vmov.8 q7[12], r1
; CHECK-NEXT: ldrb r1, [r3]
; CHECK-NEXT: vmov.8 q7[13], r1
-; CHECK-NEXT: vmov r1, r3, d1
+; CHECK-NEXT: vmov r1, r3, d5
; CHECK-NEXT: ldrb r1, [r1]
; CHECK-NEXT: vmov.8 q7[14], r1
; CHECK-NEXT: ldrb r1, [r3]
; CHECK-NEXT: vmov.8 q7[15], r1
-; CHECK-NEXT: vadd.i8 q0, q6, q7
-; CHECK-NEXT: vldrw.u32 q7, [sp, #136] @ 16-byte Reload
-; CHECK-NEXT: vstrb.8 q0, [r8], #16
-; CHECK-NEXT: vldrw.u32 q0, [sp, #168] @ 16-byte Reload
-; CHECK-NEXT: vadd.i32 q7, q7, q2
+; CHECK-NEXT: vadd.i8 q2, q5, q7
+; CHECK-NEXT: vldrw.u32 q5, [sp, #200] @ 16-byte Reload
+; CHECK-NEXT: vstrb.8 q2, [r8], #16
+; CHECK-NEXT: vmov.i32 q2, #0x30
+; CHECK-NEXT: vadd.i32 q6, q6, q2
+; CHECK-NEXT: vadd.i32 q3, q3, q2
+; CHECK-NEXT: vstrw.32 q6, [sp, #232] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q6, [sp, #296] @ 16-byte Reload
+; CHECK-NEXT: vadd.i32 q1, q1, q2
+; CHECK-NEXT: vadd.i32 q4, q4, q2
+; CHECK-NEXT: vadd.i32 q6, q6, q2
; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vmov q7, q3
+; CHECK-NEXT: vldrw.u32 q3, [sp, #136] @ 16-byte Reload
+; CHECK-NEXT: vstrw.32 q1, [sp, #264] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q1, [sp, #152] @ 16-byte Reload
+; CHECK-NEXT: vstrw.32 q4, [sp, #248] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q4, [sp, #168] @ 16-byte Reload
+; CHECK-NEXT: vstrw.32 q6, [sp, #296] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q6, [sp, #120] @ 16-byte Reload
+; CHECK-NEXT: vstrw.32 q0, [sp, #216] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q0, [sp, #280] @ 16-byte Reload
+; CHECK-NEXT: vadd.i32 q5, q5, q2
+; CHECK-NEXT: vadd.i32 q3, q3, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vadd.i32 q4, q4, q2
+; CHECK-NEXT: vadd.i32 q1, q1, q2
+; CHECK-NEXT: vadd.i32 q6, q6, q2
+; CHECK-NEXT: vstrw.32 q0, [sp, #280] @ 16-byte Spill
; CHECK-NEXT: bne.w .LBB15_3
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB15_2 Depth=1
@@ -1501,14 +1499,14 @@ define void @shlor(ptr nocapture %x, ptr noalias nocapture readonly %y, i32 %n)
; CHECK-NEXT: cmp r2, #1
; CHECK-NEXT: blt .LBB18_3
; CHECK-NEXT: @ %bb.1: @ %vector.ph
-; CHECK-NEXT: adr.w lr, .LCPI18_0
+; CHECK-NEXT: adr r3, .LCPI18_0
; CHECK-NEXT: adr r4, .LCPI18_1
; CHECK-NEXT: adr r5, .LCPI18_2
; CHECK-NEXT: adr r6, .LCPI18_3
; CHECK-NEXT: vldrw.u32 q0, [r6]
; CHECK-NEXT: vldrw.u32 q1, [r5]
; CHECK-NEXT: vldrw.u32 q2, [r4]
-; CHECK-NEXT: vldrw.u32 q3, [lr]
+; CHECK-NEXT: vldrw.u32 q3, [r3]
; CHECK-NEXT: vadd.i32 q0, q0, r1
; CHECK-NEXT: vadd.i32 q1, q1, r1
; CHECK-NEXT: vadd.i32 q2, q2, r1
diff --git a/llvm/test/CodeGen/Thumb2/mve-phireg.ll b/llvm/test/CodeGen/Thumb2/mve-phireg.ll
index dad856c..00a998c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-phireg.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-phireg.ll
@@ -38,7 +38,7 @@ define arm_aapcs_vfpcc void @k() {
; CHECK-NEXT: vmov.i32 q5, #0x0
; CHECK-NEXT: vpsel q6, q4, q3
; CHECK-NEXT: vstrh.16 q6, [r0]
-; CHECK-NEXT: vmov q6, q5
+; CHECK-NEXT: vmov.i32 q6, #0x0
; CHECK-NEXT: cbz r1, .LBB0_2
; CHECK-NEXT: le .LBB0_1
; CHECK-NEXT: .LBB0_2: @ %for.cond4.preheader
@@ -135,12 +135,12 @@ vector.body115: ; preds = %vector.body115, %ve
define dso_local i32 @e() #0 {
; CHECK-LABEL: e:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
-; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #408
-; CHECK-NEXT: sub sp, #408
+; CHECK-NEXT: .pad #392
+; CHECK-NEXT: sub sp, #392
; CHECK-NEXT: movw r7, :lower16:.L_MergedGlobals
; CHECK-NEXT: vldr s15, .LCPI1_1
; CHECK-NEXT: movt r7, :upper16:.L_MergedGlobals
@@ -148,18 +148,16 @@ define dso_local i32 @e() #0 {
; CHECK-NEXT: mov r4, r7
; CHECK-NEXT: mov r3, r7
; CHECK-NEXT: ldr r6, [r4, #8]!
-; CHECK-NEXT: vmov.i32 q0, #0x0
-; CHECK-NEXT: ldr r0, [r3, #4]!
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
; CHECK-NEXT: movt r2, :upper16:e
+; CHECK-NEXT: ldr r0, [r3, #4]!
; CHECK-NEXT: vmov r5, s15
; CHECK-NEXT: vmov q0[2], q0[0], r4, r4
-; CHECK-NEXT: vmov s13, r3
; CHECK-NEXT: vldr s12, .LCPI1_0
+; CHECK-NEXT: vmov s13, r3
; CHECK-NEXT: vmov q0[3], q0[1], r5, r2
; CHECK-NEXT: vdup.32 q7, r3
; CHECK-NEXT: vmov q6[2], q6[0], r3, r5
-; CHECK-NEXT: vstrw.32 q0, [sp, #92]
+; CHECK-NEXT: vstrw.32 q0, [sp, #76]
; CHECK-NEXT: vmov q0, q7
; CHECK-NEXT: vmov q6[3], q6[1], r3, r2
; CHECK-NEXT: vmov q4, q7
@@ -168,7 +166,7 @@ define dso_local i32 @e() #0 {
; CHECK-NEXT: vmov s21, r2
; CHECK-NEXT: movs r1, #64
; CHECK-NEXT: vmov.f32 s20, s12
-; CHECK-NEXT: str r0, [sp, #40]
+; CHECK-NEXT: str r0, [sp, #24]
; CHECK-NEXT: vmov.f32 s22, s13
; CHECK-NEXT: str r6, [r0]
; CHECK-NEXT: vmov.f32 s23, s15
@@ -186,12 +184,12 @@ define dso_local i32 @e() #0 {
; CHECK-NEXT: vmov q2[3], q2[1], r4, r5
; CHECK-NEXT: vmov.32 q4[0], r8
; CHECK-NEXT: @ implicit-def: $r2
-; CHECK-NEXT: str.w r8, [sp, #44]
-; CHECK-NEXT: vstrw.32 q3, [sp, #60]
-; CHECK-NEXT: strh.w r12, [sp, #406]
+; CHECK-NEXT: str.w r8, [sp, #28]
+; CHECK-NEXT: vstrw.32 q3, [sp, #44]
+; CHECK-NEXT: strh.w r12, [sp, #390]
; CHECK-NEXT: wlstp.8 lr, r1, .LBB1_2
; CHECK-NEXT: .LBB1_1: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i32 q0, #0x0
; CHECK-NEXT: vstrb.8 q0, [r2], #16
; CHECK-NEXT: letp lr, .LBB1_1
; CHECK-NEXT: .LBB1_2: @ %entry
@@ -199,7 +197,7 @@ define dso_local i32 @e() #0 {
; CHECK-NEXT: str.w r8, [r7]
; CHECK-NEXT: vstrw.32 q4, [r0]
; CHECK-NEXT: vstrw.32 q2, [r0]
-; CHECK-NEXT: str.w r12, [sp, #324]
+; CHECK-NEXT: str.w r12, [sp, #308]
; CHECK-NEXT: .LBB1_3: @ %for.cond
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: b .LBB1_3
diff --git a/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
index f90af3c..2587a0bb 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
@@ -115,17 +115,17 @@ define void @DCT_mve2(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: movs r4, #1
; CHECK-NEXT: ldr r3, [r0]
; CHECK-NEXT: add.w r11, r3, r12, lsl #2
-; CHECK-NEXT: add.w r7, r3, r12, lsl #3
-; CHECK-NEXT: lsl.w r9, r12, #3
+; CHECK-NEXT: add.w r6, r3, r12, lsl #3
+; CHECK-NEXT: lsl.w r10, r12, #3
; CHECK-NEXT: .LBB1_2: @ %for.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB1_3 Depth 2
; CHECK-NEXT: ldr r5, [sp] @ 4-byte Reload
+; CHECK-NEXT: add.w r9, r4, #1
; CHECK-NEXT: vmov.i32 q0, #0x0
-; CHECK-NEXT: add.w r10, r4, #1
; CHECK-NEXT: mov r3, r11
-; CHECK-NEXT: mov r0, r7
-; CHECK-NEXT: vmov q1, q0
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: vmov.i32 q1, #0x0
; CHECK-NEXT: dlstp.32 lr, r12
; CHECK-NEXT: .LBB1_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB1_2 Depth=1
@@ -139,11 +139,11 @@ define void @DCT_mve2(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB1_2 Depth=1
; CHECK-NEXT: vadd.f32 s2, s2, s3
-; CHECK-NEXT: add.w r0, r2, r10, lsl #2
+; CHECK-NEXT: add.w r0, r2, r9, lsl #2
; CHECK-NEXT: vadd.f32 s0, s0, s1
-; CHECK-NEXT: add r11, r9
+; CHECK-NEXT: add r11, r10
; CHECK-NEXT: vadd.f32 s6, s6, s7
-; CHECK-NEXT: add r7, r9
+; CHECK-NEXT: add r6, r10
; CHECK-NEXT: vadd.f32 s4, s4, s5
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vadd.f32 s2, s4, s6
@@ -228,46 +228,40 @@ define void @DCT_mve3(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #24
-; CHECK-NEXT: sub sp, #24
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: ldr r1, [r0, #4]
-; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill
; CHECK-NEXT: subs r1, #3
-; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT: cmp r1, #2
; CHECK-NEXT: blo .LBB2_5
; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
-; CHECK-NEXT: ldr r3, [r0, #8]
+; CHECK-NEXT: ldr.w r9, [r0, #8]
; CHECK-NEXT: movs r5, #1
; CHECK-NEXT: ldr r1, [r0]
-; CHECK-NEXT: str r3, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: add.w r0, r3, r3, lsl #1
-; CHECK-NEXT: add.w r9, r1, r3, lsl #2
-; CHECK-NEXT: add.w r12, r1, r3, lsl #3
-; CHECK-NEXT: adds r3, #3
+; CHECK-NEXT: add.w r3, r9, #3
; CHECK-NEXT: bic r3, r3, #3
-; CHECK-NEXT: ldr r7, [sp, #4] @ 4-byte Reload
-; CHECK-NEXT: add.w r10, r1, r0, lsl #2
+; CHECK-NEXT: add.w r0, r9, r9, lsl #1
; CHECK-NEXT: subs r3, #4
+; CHECK-NEXT: add.w r10, r1, r9, lsl #2
+; CHECK-NEXT: add.w r12, r1, r9, lsl #3
+; CHECK-NEXT: add.w r1, r1, r0, lsl #2
+; CHECK-NEXT: add.w r3, r5, r3, lsr #2
+; CHECK-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT: ldr r7, [sp, #4] @ 4-byte Reload
; CHECK-NEXT: lsl.w r11, r0, #2
-; CHECK-NEXT: add.w r1, r5, r3, lsr #2
-; CHECK-NEXT: str r1, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB2_2: @ %for.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB2_3 Depth 2
-; CHECK-NEXT: ldr r6, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT: ldr r6, [sp, #12] @ 4-byte Reload
; CHECK-NEXT: vmov.i32 q0, #0x0
-; CHECK-NEXT: adds r0, r5, #2
-; CHECK-NEXT: adds r2, r5, #1
-; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload
-; CHECK-NEXT: mov r3, r9
-; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: mov r3, r10
; CHECK-NEXT: mov r0, r12
-; CHECK-NEXT: mov r4, r10
-; CHECK-NEXT: vmov q2, q0
-; CHECK-NEXT: vmov q1, q0
-; CHECK-NEXT: dlstp.32 lr, r7
+; CHECK-NEXT: mov r4, r1
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: dlstp.32 lr, r9
; CHECK-NEXT: .LBB2_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB2_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
@@ -282,31 +276,31 @@ define void @DCT_mve3(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB2_2 Depth=1
; CHECK-NEXT: vadd.f32 s10, s10, s11
-; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: adds r0, r5, #1
; CHECK-NEXT: vadd.f32 s8, s8, s9
-; CHECK-NEXT: add r9, r11
+; CHECK-NEXT: add r10, r11
; CHECK-NEXT: vadd.f32 s6, s6, s7
-; CHECK-NEXT: add.w r0, r1, r2, lsl #2
+; CHECK-NEXT: add.w r0, r2, r0, lsl #2
; CHECK-NEXT: vadd.f32 s4, s4, s5
; CHECK-NEXT: add r12, r11
; CHECK-NEXT: vadd.f32 s2, s2, s3
-; CHECK-NEXT: add r10, r11
+; CHECK-NEXT: add r1, r11
; CHECK-NEXT: vadd.f32 s0, s0, s1
; CHECK-NEXT: vadd.f32 s8, s8, s10
; CHECK-NEXT: vadd.f32 s4, s4, s6
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vstr s8, [r0]
-; CHECK-NEXT: add.w r0, r1, r5, lsl #2
-; CHECK-NEXT: adds r5, #3
+; CHECK-NEXT: add.w r0, r2, r5, lsl #2
; CHECK-NEXT: vstr s4, [r0]
-; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
-; CHECK-NEXT: add.w r0, r1, r0, lsl #2
+; CHECK-NEXT: adds r0, r5, #2
+; CHECK-NEXT: adds r5, #3
+; CHECK-NEXT: add.w r0, r2, r0, lsl #2
; CHECK-NEXT: vstr s0, [r0]
-; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: cmp r5, r0
; CHECK-NEXT: blo .LBB2_2
; CHECK-NEXT: .LBB2_5: @ %for.cond.cleanup
-; CHECK-NEXT: add sp, #24
+; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
@@ -394,15 +388,15 @@ define void @DCT_mve4(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #40
-; CHECK-NEXT: sub sp, #40
-; CHECK-NEXT: str r1, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT: .pad #24
+; CHECK-NEXT: sub sp, #24
+; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
; CHECK-NEXT: ldr r1, [r0, #4]
-; CHECK-NEXT: str r2, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: subs r1, #4
-; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: cmp r1, #2
-; CHECK-NEXT: blo.w .LBB3_5
+; CHECK-NEXT: blo .LBB3_5
; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
; CHECK-NEXT: ldr r2, [r0, #8]
; CHECK-NEXT: movs r6, #1
@@ -410,34 +404,28 @@ define void @DCT_mve4(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: add.w r0, r2, r2, lsl #1
; CHECK-NEXT: add.w r12, r1, r2, lsl #2
; CHECK-NEXT: add.w r8, r1, r2, lsl #3
-; CHECK-NEXT: add.w r9, r1, r2, lsl #4
-; CHECK-NEXT: add.w r11, r1, r0, lsl #2
+; CHECK-NEXT: add.w r10, r1, r2, lsl #4
+; CHECK-NEXT: add.w r9, r1, r0, lsl #2
; CHECK-NEXT: adds r0, r2, #3
; CHECK-NEXT: bic r0, r0, #3
; CHECK-NEXT: subs r0, #4
; CHECK-NEXT: add.w r0, r6, r0, lsr #2
-; CHECK-NEXT: strd r0, r2, [sp, #8] @ 8-byte Folded Spill
+; CHECK-NEXT: strd r0, r2, [sp, #4] @ 8-byte Folded Spill
; CHECK-NEXT: lsls r0, r2, #4
-; CHECK-NEXT: ldrd r2, r7, [sp, #8] @ 8-byte Folded Reload
-; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT: ldrd r2, r7, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT: str r0, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB3_2: @ %for.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB3_3 Depth 2
-; CHECK-NEXT: adds r0, r6, #3
-; CHECK-NEXT: str r0, [sp, #36] @ 4-byte Spill
-; CHECK-NEXT: adds r0, r6, #2
-; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload
; CHECK-NEXT: vmov.i32 q0, #0x0
-; CHECK-NEXT: str r0, [sp, #32] @ 4-byte Spill
-; CHECK-NEXT: adds r0, r6, #1
-; CHECK-NEXT: str r0, [sp, #28] @ 4-byte Spill
; CHECK-NEXT: mov r3, r12
; CHECK-NEXT: mov r0, r8
-; CHECK-NEXT: mov r5, r11
-; CHECK-NEXT: mov r4, r9
-; CHECK-NEXT: vmov q1, q0
-; CHECK-NEXT: vmov q2, q0
-; CHECK-NEXT: vmov q3, q0
+; CHECK-NEXT: mov r5, r9
+; CHECK-NEXT: mov r4, r10
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: dlstp.32 lr, r7
; CHECK-NEXT: .LBB3_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB3_2 Depth=1
@@ -455,9 +443,9 @@ define void @DCT_mve4(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB3_2 Depth=1
; CHECK-NEXT: vadd.f32 s14, s14, s15
-; CHECK-NEXT: ldr r0, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT: vadd.f32 s12, s12, s13
-; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT: adds r0, r6, #1
; CHECK-NEXT: vadd.f32 s10, s10, s11
; CHECK-NEXT: vadd.f32 s8, s8, s9
; CHECK-NEXT: add.w r0, r1, r0, lsl #2
@@ -471,24 +459,24 @@ define void @DCT_mve4(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vstr s12, [r0]
; CHECK-NEXT: add.w r0, r1, r6, lsl #2
-; CHECK-NEXT: adds r6, #4
; CHECK-NEXT: vstr s8, [r0]
-; CHECK-NEXT: ldr r0, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT: adds r0, r6, #2
; CHECK-NEXT: add.w r0, r1, r0, lsl #2
; CHECK-NEXT: vstr s4, [r0]
-; CHECK-NEXT: ldr r0, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT: adds r0, r6, #3
+; CHECK-NEXT: adds r6, #4
; CHECK-NEXT: add.w r0, r1, r0, lsl #2
; CHECK-NEXT: vstr s0, [r0]
-; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload
; CHECK-NEXT: add r12, r0
; CHECK-NEXT: add r8, r0
-; CHECK-NEXT: add r11, r0
; CHECK-NEXT: add r9, r0
-; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT: add r10, r0
+; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
; CHECK-NEXT: cmp r6, r0
; CHECK-NEXT: blo .LBB3_2
; CHECK-NEXT: .LBB3_5: @ %for.cond.cleanup
-; CHECK-NEXT: add sp, #40
+; CHECK-NEXT: add sp, #24
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
@@ -588,60 +576,53 @@ define void @DCT_mve5(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
-; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: ldr r1, [r0, #4]
; CHECK-NEXT: subs r1, #5
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT: cmp r1, #2
; CHECK-NEXT: blo.w .LBB4_5
; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
-; CHECK-NEXT: ldr r3, [r0, #8]
+; CHECK-NEXT: ldr.w r12, [r0, #8]
; CHECK-NEXT: ldr r1, [r0]
-; CHECK-NEXT: adds r0, r3, #3
-; CHECK-NEXT: str r3, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: add.w r0, r12, #3
; CHECK-NEXT: bic r0, r0, #3
-; CHECK-NEXT: add.w r8, r1, r3, lsl #2
+; CHECK-NEXT: add.w r8, r1, r12, lsl #2
; CHECK-NEXT: subs r1, r0, #4
; CHECK-NEXT: movs r0, #1
-; CHECK-NEXT: lsls r5, r3, #2
+; CHECK-NEXT: lsl.w r5, r12, #2
; CHECK-NEXT: add.w r1, r0, r1, lsr #2
-; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: add.w r1, r3, r3, lsl #2
-; CHECK-NEXT: lsls r1, r1, #2
; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT: add.w r1, r12, r12, lsl #2
+; CHECK-NEXT: lsls r1, r1, #2
+; CHECK-NEXT: str r1, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB4_2: @ %for.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB4_3 Depth 2
-; CHECK-NEXT: ldr r7, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: adds r1, r0, #4
-; CHECK-NEXT: ldr r4, [sp, #20] @ 4-byte Reload
-; CHECK-NEXT: vmov.i32 q1, #0x0
-; CHECK-NEXT: str r1, [sp, #28] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #3
-; CHECK-NEXT: add.w r10, r0, #2
+; CHECK-NEXT: ldr r4, [sp, #12] @ 4-byte Reload
; CHECK-NEXT: add.w r11, r0, #1
-; CHECK-NEXT: ldr r6, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: ldr r7, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: vmov.i32 q1, #0x0
; CHECK-NEXT: mov r3, r8
-; CHECK-NEXT: str r1, [sp, #24] @ 4-byte Spill
-; CHECK-NEXT: vmov q0, q1
-; CHECK-NEXT: vmov q3, q1
-; CHECK-NEXT: vmov q2, q1
-; CHECK-NEXT: vmov q4, q1
-; CHECK-NEXT: dlstp.32 lr, r7
+; CHECK-NEXT: vmov.i32 q0, #0x0
+; CHECK-NEXT: vmov.i32 q3, #0x0
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vmov.i32 q4, #0x0
+; CHECK-NEXT: dlstp.32 lr, r12
; CHECK-NEXT: .LBB4_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB4_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
; CHECK-NEXT: add.w r9, r3, r5
; CHECK-NEXT: vldrw.u32 q5, [r4], #16
; CHECK-NEXT: vldrw.u32 q6, [r3], #16
-; CHECK-NEXT: add.w r12, r9, r5
+; CHECK-NEXT: add.w r10, r9, r5
; CHECK-NEXT: vfma.f32 q3, q6, q5
; CHECK-NEXT: vldrw.u32 q6, [r9]
-; CHECK-NEXT: add.w r6, r12, r5
+; CHECK-NEXT: add.w r6, r10, r5
; CHECK-NEXT: vfma.f32 q4, q6, q5
-; CHECK-NEXT: vldrw.u32 q6, [r12]
+; CHECK-NEXT: vldrw.u32 q6, [r10]
; CHECK-NEXT: adds r7, r6, r5
; CHECK-NEXT: vfma.f32 q2, q6, q5
; CHECK-NEXT: vldrw.u32 q6, [r6]
@@ -662,30 +643,31 @@ define void @DCT_mve5(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: vadd.f32 s8, s8, s9
; CHECK-NEXT: vadd.f32 s0, s0, s1
; CHECK-NEXT: vadd.f32 s1, s16, s18
-; CHECK-NEXT: vadd.f32 s2, s2, s3
; CHECK-NEXT: vadd.f32 s12, s12, s14
+; CHECK-NEXT: vadd.f32 s2, s2, s3
; CHECK-NEXT: vadd.f32 s4, s4, s6
; CHECK-NEXT: vadd.f32 s6, s8, s10
; CHECK-NEXT: vstr s1, [r1]
; CHECK-NEXT: add.w r1, r2, r0, lsl #2
-; CHECK-NEXT: vadd.f32 s0, s0, s2
-; CHECK-NEXT: adds r0, #5
; CHECK-NEXT: vstr s12, [r1]
-; CHECK-NEXT: add.w r1, r2, r10, lsl #2
+; CHECK-NEXT: adds r1, r0, #2
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s6, [r1]
-; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #3
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s0, [r1]
-; CHECK-NEXT: ldr r1, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #4
+; CHECK-NEXT: adds r0, #5
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s4, [r1]
-; CHECK-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT: add r8, r1
-; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: blo.w .LBB4_2
+; CHECK-NEXT: blo .LBB4_2
; CHECK-NEXT: .LBB4_5: @ %for.cond.cleanup
-; CHECK-NEXT: add sp, #32
+; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
@@ -797,63 +779,54 @@ define void @DCT_mve6(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: ldr r1, [r0, #4]
; CHECK-NEXT: subs r1, #6
-; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT: cmp r1, #2
; CHECK-NEXT: blo.w .LBB5_5
; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
-; CHECK-NEXT: ldr r3, [r0, #8]
+; CHECK-NEXT: ldr.w r12, [r0, #8]
; CHECK-NEXT: ldr r1, [r0]
-; CHECK-NEXT: adds r0, r3, #3
-; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: add.w r0, r12, #3
; CHECK-NEXT: bic r0, r0, #3
-; CHECK-NEXT: add.w r8, r1, r3, lsl #2
+; CHECK-NEXT: add.w r8, r1, r12, lsl #2
; CHECK-NEXT: subs r1, r0, #4
; CHECK-NEXT: movs r0, #1
-; CHECK-NEXT: lsls r5, r3, #2
+; CHECK-NEXT: lsl.w r5, r12, #2
; CHECK-NEXT: add.w r1, r0, r1, lsr #2
; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: add.w r1, r3, r3, lsl #1
+; CHECK-NEXT: add.w r1, r12, r12, lsl #1
; CHECK-NEXT: lsls r1, r1, #3
; CHECK-NEXT: str r1, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB5_2: @ %for.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB5_3 Depth 2
-; CHECK-NEXT: adds r1, r0, #5
-; CHECK-NEXT: str r1, [sp, #28] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #4
-; CHECK-NEXT: str r1, [sp, #24] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #3
-; CHECK-NEXT: ldr r7, [sp, #8] @ 4-byte Reload
-; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
-; CHECK-NEXT: vmov.i32 q1, #0x0
-; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
-; CHECK-NEXT: add.w r11, r0, #2
+; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT: adds r4, r0, #1
-; CHECK-NEXT: ldr r6, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: ldr r7, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: vmov.i32 q1, #0x0
; CHECK-NEXT: mov r3, r8
-; CHECK-NEXT: vmov q3, q1
-; CHECK-NEXT: vmov q4, q1
-; CHECK-NEXT: vmov q0, q1
-; CHECK-NEXT: vmov q5, q1
-; CHECK-NEXT: vmov q2, q1
-; CHECK-NEXT: dlstp.32 lr, r7
+; CHECK-NEXT: vmov.i32 q3, #0x0
+; CHECK-NEXT: vmov.i32 q4, #0x0
+; CHECK-NEXT: vmov.i32 q0, #0x0
+; CHECK-NEXT: vmov.i32 q5, #0x0
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: dlstp.32 lr, r12
; CHECK-NEXT: .LBB5_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB5_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: add.w r12, r3, r5
+; CHECK-NEXT: add.w r10, r3, r5
; CHECK-NEXT: vldrw.u32 q6, [r1], #16
; CHECK-NEXT: vldrw.u32 q7, [r3], #16
-; CHECK-NEXT: add.w r10, r12, r5
+; CHECK-NEXT: add.w r11, r10, r5
; CHECK-NEXT: vfma.f32 q4, q7, q6
-; CHECK-NEXT: vldrw.u32 q7, [r12]
-; CHECK-NEXT: add.w r6, r10, r5
-; CHECK-NEXT: vfma.f32 q5, q7, q6
; CHECK-NEXT: vldrw.u32 q7, [r10]
+; CHECK-NEXT: add.w r6, r11, r5
+; CHECK-NEXT: vfma.f32 q5, q7, q6
+; CHECK-NEXT: vldrw.u32 q7, [r11]
; CHECK-NEXT: adds r7, r6, r5
; CHECK-NEXT: vfma.f32 q2, q7, q6
; CHECK-NEXT: vldrw.u32 q7, [r6]
@@ -885,28 +858,29 @@ define void @DCT_mve6(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: vstr s1, [r1]
; CHECK-NEXT: add.w r1, r2, r0, lsl #2
; CHECK-NEXT: vadd.f32 s0, s0, s2
-; CHECK-NEXT: adds r0, #6
; CHECK-NEXT: vstr s3, [r1]
-; CHECK-NEXT: add.w r1, r2, r11, lsl #2
+; CHECK-NEXT: adds r1, r0, #2
; CHECK-NEXT: vadd.f32 s4, s4, s6
+; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s8, [r1]
-; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #3
; CHECK-NEXT: vadd.f32 s6, s12, s14
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s0, [r1]
-; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #4
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s6, [r1]
-; CHECK-NEXT: ldr r1, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #5
+; CHECK-NEXT: adds r0, #6
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s4, [r1]
; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT: add r8, r1
-; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: cmp r0, r1
; CHECK-NEXT: blo.w .LBB5_2
; CHECK-NEXT: .LBB5_5: @ %for.cond.cleanup
-; CHECK-NEXT: add sp, #32
+; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
@@ -1030,73 +1004,64 @@ define void @DCT_mve7(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #72
-; CHECK-NEXT: sub sp, #72
-; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: .pad #48
+; CHECK-NEXT: sub sp, #48
+; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: ldr r1, [r0, #4]
; CHECK-NEXT: subs r1, #7
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT: cmp r1, #2
; CHECK-NEXT: blo.w .LBB6_5
; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
-; CHECK-NEXT: ldr r3, [r0, #8]
+; CHECK-NEXT: ldr.w r10, [r0, #8]
; CHECK-NEXT: ldr r1, [r0]
-; CHECK-NEXT: adds r0, r3, #3
-; CHECK-NEXT: str r3, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: add.w r0, r10, #3
; CHECK-NEXT: bic r0, r0, #3
-; CHECK-NEXT: add.w r9, r1, r3, lsl #2
+; CHECK-NEXT: add.w r9, r1, r10, lsl #2
; CHECK-NEXT: subs r1, r0, #4
; CHECK-NEXT: movs r0, #1
-; CHECK-NEXT: lsls r5, r3, #2
+; CHECK-NEXT: lsl.w r5, r10, #2
; CHECK-NEXT: add.w r1, r0, r1, lsr #2
-; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: rsb r1, r3, r3, lsl #3
-; CHECK-NEXT: lsls r1, r1, #2
; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT: rsb r1, r10, r10, lsl #3
+; CHECK-NEXT: lsls r1, r1, #2
+; CHECK-NEXT: str r1, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB6_2: @ %for.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB6_3 Depth 2
-; CHECK-NEXT: adds r1, r0, #6
-; CHECK-NEXT: str r1, [sp, #36] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #5
-; CHECK-NEXT: str r1, [sp, #32] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #4
-; CHECK-NEXT: str r1, [sp, #28] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #3
-; CHECK-NEXT: ldr r7, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: str r1, [sp, #24] @ 4-byte Spill
-; CHECK-NEXT: vmov.i32 q2, #0x0
-; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
; CHECK-NEXT: adds r4, r0, #2
; CHECK-NEXT: add.w r8, r0, #1
-; CHECK-NEXT: ldr r6, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: ldr r7, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: vmov.i32 q0, #0x0
+; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: mov r3, r9
-; CHECK-NEXT: vmov q4, q2
-; CHECK-NEXT: vmov q5, q2
-; CHECK-NEXT: vmov q3, q2
-; CHECK-NEXT: vmov q6, q2
-; CHECK-NEXT: vmov q1, q2
-; CHECK-NEXT: mov r12, r7
-; CHECK-NEXT: vstrw.32 q2, [sp, #56] @ 16-byte Spill
-; CHECK-NEXT: dls lr, r6
+; CHECK-NEXT: vmov.i32 q4, #0x0
+; CHECK-NEXT: vmov.i32 q5, #0x0
+; CHECK-NEXT: vmov.i32 q3, #0x0
+; CHECK-NEXT: vmov.i32 q6, #0x0
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: mov r12, r10
+; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT: dls lr, r7
; CHECK-NEXT: .LBB6_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB6_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
; CHECK-NEXT: vctp.32 r12
-; CHECK-NEXT: add.w r10, r3, r5
+; CHECK-NEXT: add.w r11, r3, r5
; CHECK-NEXT: vpstt
; CHECK-NEXT: vldrwt.u32 q7, [r1], #16
; CHECK-NEXT: vldrwt.u32 q0, [r3], #16
-; CHECK-NEXT: add.w r11, r10, r5
+; CHECK-NEXT: add.w r6, r11, r5
; CHECK-NEXT: sub.w r12, r12, #4
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q5, q0, q7
-; CHECK-NEXT: vldrwt.u32 q0, [r10]
-; CHECK-NEXT: add.w r6, r11, r5
+; CHECK-NEXT: vldrwt.u32 q0, [r11]
+; CHECK-NEXT: adds r7, r6, r5
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q6, q0, q7
-; CHECK-NEXT: vldrwt.u32 q0, [r11]
-; CHECK-NEXT: vstrw.32 q6, [sp, #40] @ 16-byte Spill
+; CHECK-NEXT: vldrwt.u32 q0, [r6]
+; CHECK-NEXT: vstrw.32 q6, [sp, #16] @ 16-byte Spill
; CHECK-NEXT: vmov q6, q5
; CHECK-NEXT: vpst
; CHECK-NEXT: vfmat.f32 q1, q0, q7
@@ -1104,26 +1069,26 @@ define void @DCT_mve7(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: vmov q4, q3
; CHECK-NEXT: vmov q3, q1
; CHECK-NEXT: vpst
-; CHECK-NEXT: vldrwt.u32 q0, [r6]
-; CHECK-NEXT: vldrw.u32 q1, [sp, #56] @ 16-byte Reload
-; CHECK-NEXT: adds r7, r6, r5
-; CHECK-NEXT: vpstt
-; CHECK-NEXT: vfmat.f32 q1, q0, q7
; CHECK-NEXT: vldrwt.u32 q0, [r7]
+; CHECK-NEXT: vldrw.u32 q1, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: adds r6, r7, r5
-; CHECK-NEXT: vstrw.32 q1, [sp, #56] @ 16-byte Spill
+; CHECK-NEXT: vpstt
+; CHECK-NEXT: vfmat.f32 q1, q0, q7
+; CHECK-NEXT: vldrwt.u32 q0, [r6]
+; CHECK-NEXT: adds r7, r6, r5
+; CHECK-NEXT: vstrw.32 q1, [sp, #32] @ 16-byte Spill
; CHECK-NEXT: vmov q1, q3
; CHECK-NEXT: vmov q3, q4
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q3, q0, q7
-; CHECK-NEXT: vldrwt.u32 q0, [r6]
+; CHECK-NEXT: vldrwt.u32 q0, [r7]
; CHECK-NEXT: vmov q4, q5
-; CHECK-NEXT: adds r7, r6, r5
+; CHECK-NEXT: adds r6, r7, r5
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q4, q0, q7
-; CHECK-NEXT: vldrwt.u32 q0, [r7]
+; CHECK-NEXT: vldrwt.u32 q0, [r6]
; CHECK-NEXT: vmov q5, q6
-; CHECK-NEXT: vldrw.u32 q6, [sp, #40] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
; CHECK-NEXT: vpst
; CHECK-NEXT: vfmat.f32 q2, q0, q7
; CHECK-NEXT: le lr, .LBB6_3
@@ -1138,45 +1103,45 @@ define void @DCT_mve7(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: vadd.f32 s4, s4, s5
; CHECK-NEXT: vadd.f32 s10, s10, s11
; CHECK-NEXT: vadd.f32 s8, s8, s9
-; CHECK-NEXT: vadd.f32 s0, s2, s0
; CHECK-NEXT: vadd.f32 s9, s18, s19
; CHECK-NEXT: vadd.f32 s11, s16, s17
-; CHECK-NEXT: vldrw.u32 q4, [sp, #56] @ 16-byte Reload
-; CHECK-NEXT: vadd.f32 s2, s3, s1
+; CHECK-NEXT: vldrw.u32 q4, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT: vadd.f32 s0, s2, s0
; CHECK-NEXT: vadd.f32 s5, s18, s19
; CHECK-NEXT: vadd.f32 s7, s16, s17
+; CHECK-NEXT: vadd.f32 s2, s3, s1
; CHECK-NEXT: vadd.f32 s4, s4, s6
-; CHECK-NEXT: vstr s0, [r1]
-; CHECK-NEXT: add.w r1, r2, r0, lsl #2
; CHECK-NEXT: vadd.f32 s14, s14, s15
-; CHECK-NEXT: adds r0, #7
; CHECK-NEXT: vadd.f32 s12, s12, s13
-; CHECK-NEXT: vstr s2, [r1]
-; CHECK-NEXT: add.w r1, r2, r4, lsl #2
+; CHECK-NEXT: vstr s0, [r1]
+; CHECK-NEXT: add.w r1, r2, r0, lsl #2
; CHECK-NEXT: vadd.f32 s8, s8, s10
; CHECK-NEXT: vadd.f32 s6, s7, s5
-; CHECK-NEXT: vstr s4, [r1]
+; CHECK-NEXT: vstr s2, [r1]
+; CHECK-NEXT: add.w r1, r2, r4, lsl #2
; CHECK-NEXT: vadd.f32 s10, s11, s9
-; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT: vstr s4, [r1]
+; CHECK-NEXT: adds r1, r0, #3
; CHECK-NEXT: vadd.f32 s12, s12, s14
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s6, [r1]
-; CHECK-NEXT: ldr r1, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #4
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s12, [r1]
-; CHECK-NEXT: ldr r1, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #5
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s10, [r1]
-; CHECK-NEXT: ldr r1, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #6
+; CHECK-NEXT: adds r0, #7
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s8, [r1]
-; CHECK-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT: add r9, r1
-; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: cmp r0, r1
; CHECK-NEXT: blo.w .LBB6_2
; CHECK-NEXT: .LBB6_5: @ %for.cond.cleanup
-; CHECK-NEXT: add sp, #72
+; CHECK-NEXT: add sp, #48
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
@@ -1312,107 +1277,99 @@ define void @DCT_mve8(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #88
-; CHECK-NEXT: sub sp, #88
-; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT: .pad #64
+; CHECK-NEXT: sub sp, #64
+; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
; CHECK-NEXT: ldr r1, [r0, #4]
; CHECK-NEXT: subs r1, #8
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
; CHECK-NEXT: cmp r1, #2
; CHECK-NEXT: blo.w .LBB7_5
; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
-; CHECK-NEXT: ldr r3, [r0, #8]
+; CHECK-NEXT: ldr.w r11, [r0, #8]
; CHECK-NEXT: ldr r1, [r0]
-; CHECK-NEXT: adds r0, r3, #3
-; CHECK-NEXT: str r3, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: add.w r0, r11, #3
; CHECK-NEXT: bic r0, r0, #3
-; CHECK-NEXT: add.w r12, r1, r3, lsl #2
+; CHECK-NEXT: add.w r12, r1, r11, lsl #2
; CHECK-NEXT: subs r1, r0, #4
; CHECK-NEXT: movs r0, #1
-; CHECK-NEXT: lsls r6, r3, #2
+; CHECK-NEXT: lsl.w r6, r11, #2
; CHECK-NEXT: add.w r1, r0, r1, lsr #2
-; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: lsls r1, r3, #5
; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT: lsl.w r1, r11, #5
+; CHECK-NEXT: str r1, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB7_2: @ %for.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB7_3 Depth 2
-; CHECK-NEXT: adds r1, r0, #7
-; CHECK-NEXT: str r1, [sp, #36] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #6
-; CHECK-NEXT: str r1, [sp, #32] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #5
-; CHECK-NEXT: ldr r7, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: str r1, [sp, #28] @ 4-byte Spill
-; CHECK-NEXT: adds r1, r0, #4
-; CHECK-NEXT: ldr.w r9, [sp, #20] @ 4-byte Reload
-; CHECK-NEXT: vmov.i32 q3, #0x0
-; CHECK-NEXT: str r1, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT: ldr.w r9, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: vmov.i32 q0, #0x0
; CHECK-NEXT: adds r4, r0, #3
; CHECK-NEXT: add.w r8, r0, #2
; CHECK-NEXT: adds r1, r0, #1
-; CHECK-NEXT: ldr r5, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: ldr r7, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT: vmov.i32 q0, #0x0
+; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: mov r3, r12
-; CHECK-NEXT: vmov q5, q3
-; CHECK-NEXT: vmov q6, q3
-; CHECK-NEXT: vmov q4, q3
-; CHECK-NEXT: vmov q7, q3
-; CHECK-NEXT: vmov q2, q3
-; CHECK-NEXT: mov r10, r7
-; CHECK-NEXT: vstrw.32 q3, [sp, #56] @ 16-byte Spill
-; CHECK-NEXT: vstrw.32 q3, [sp, #72] @ 16-byte Spill
-; CHECK-NEXT: dls lr, r5
+; CHECK-NEXT: vmov.i32 q5, #0x0
+; CHECK-NEXT: vmov.i32 q6, #0x0
+; CHECK-NEXT: vmov.i32 q4, #0x0
+; CHECK-NEXT: vmov.i32 q7, #0x0
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: mov r10, r11
+; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT: dls lr, r7
; CHECK-NEXT: .LBB7_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB7_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
; CHECK-NEXT: vctp.32 r10
-; CHECK-NEXT: add.w r11, r3, r6
+; CHECK-NEXT: adds r5, r3, r6
; CHECK-NEXT: vpstt
; CHECK-NEXT: vldrwt.u32 q0, [r9], #16
; CHECK-NEXT: vldrwt.u32 q1, [r3], #16
-; CHECK-NEXT: add.w r5, r11, r6
+; CHECK-NEXT: adds r7, r5, r6
; CHECK-NEXT: sub.w r10, r10, #4
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q6, q1, q0
-; CHECK-NEXT: vldrwt.u32 q1, [r11]
-; CHECK-NEXT: vstrw.32 q6, [sp, #40] @ 16-byte Spill
+; CHECK-NEXT: vldrwt.u32 q1, [r5]
+; CHECK-NEXT: vstrw.32 q6, [sp, #16] @ 16-byte Spill
; CHECK-NEXT: vmov q6, q5
; CHECK-NEXT: vpst
; CHECK-NEXT: vfmat.f32 q7, q1, q0
-; CHECK-NEXT: vmov q5, q3
-; CHECK-NEXT: vmov q3, q4
-; CHECK-NEXT: vmov q4, q2
+; CHECK-NEXT: vmov q5, q4
+; CHECK-NEXT: vmov q4, q3
+; CHECK-NEXT: vmov q3, q2
; CHECK-NEXT: vpst
-; CHECK-NEXT: vldrwt.u32 q1, [r5]
-; CHECK-NEXT: vldrw.u32 q2, [sp, #56] @ 16-byte Reload
-; CHECK-NEXT: adds r7, r5, r6
-; CHECK-NEXT: vpstt
-; CHECK-NEXT: vfmat.f32 q2, q1, q0
; CHECK-NEXT: vldrwt.u32 q1, [r7]
-; CHECK-NEXT: vstrw.32 q2, [sp, #56] @ 16-byte Spill
-; CHECK-NEXT: vldrw.u32 q2, [sp, #72] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q2, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: adds r5, r7, r6
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q2, q1, q0
; CHECK-NEXT: vldrwt.u32 q1, [r5]
+; CHECK-NEXT: vstrw.32 q2, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q2, [sp, #48] @ 16-byte Reload
; CHECK-NEXT: adds r7, r5, r6
-; CHECK-NEXT: vstrw.32 q2, [sp, #72] @ 16-byte Spill
-; CHECK-NEXT: vmov q2, q4
-; CHECK-NEXT: vmov q4, q3
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q2, q1, q0
; CHECK-NEXT: vldrwt.u32 q1, [r7]
; CHECK-NEXT: adds r5, r7, r6
-; CHECK-NEXT: vmov q3, q5
+; CHECK-NEXT: vstrw.32 q2, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT: vmov q2, q3
+; CHECK-NEXT: vmov q3, q4
; CHECK-NEXT: vpstt
-; CHECK-NEXT: vfmat.f32 q4, q1, q0
+; CHECK-NEXT: vfmat.f32 q2, q1, q0
; CHECK-NEXT: vldrwt.u32 q1, [r5]
+; CHECK-NEXT: vmov q4, q5
+; CHECK-NEXT: adds r7, r5, r6
+; CHECK-NEXT: vpstt
+; CHECK-NEXT: vfmat.f32 q4, q1, q0
+; CHECK-NEXT: vldrwt.u32 q1, [r7]
; CHECK-NEXT: vmov q5, q6
-; CHECK-NEXT: add r5, r6
+; CHECK-NEXT: adds r5, r7, r6
; CHECK-NEXT: vpstt
; CHECK-NEXT: vfmat.f32 q5, q1, q0
; CHECK-NEXT: vldrwt.u32 q1, [r5]
-; CHECK-NEXT: vldrw.u32 q6, [sp, #40] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
; CHECK-NEXT: vpst
; CHECK-NEXT: vfmat.f32 q3, q1, q0
; CHECK-NEXT: le lr, .LBB7_3
@@ -1425,12 +1382,12 @@ define void @DCT_mve8(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: vadd.f32 s6, s24, s25
; CHECK-NEXT: vadd.f32 s5, s18, s19
; CHECK-NEXT: vadd.f32 s7, s16, s17
-; CHECK-NEXT: vldrw.u32 q4, [sp, #56] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q4, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: vadd.f32 s10, s10, s11
; CHECK-NEXT: vadd.f32 s8, s8, s9
; CHECK-NEXT: vadd.f32 s9, s18, s19
; CHECK-NEXT: vadd.f32 s11, s16, s17
-; CHECK-NEXT: vldrw.u32 q4, [sp, #72] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q4, [sp, #48] @ 16-byte Reload
; CHECK-NEXT: vadd.f32 s14, s14, s15
; CHECK-NEXT: vadd.f32 s12, s12, s13
; CHECK-NEXT: vadd.f32 s13, s18, s19
@@ -1445,33 +1402,33 @@ define void @DCT_mve8(ptr nocapture readonly %S, ptr nocapture readonly %pIn, pt
; CHECK-NEXT: vstr s0, [r1]
; CHECK-NEXT: add.w r1, r2, r0, lsl #2
; CHECK-NEXT: vadd.f32 s3, s20, s21
-; CHECK-NEXT: adds r0, #8
; CHECK-NEXT: vstr s2, [r1]
; CHECK-NEXT: add.w r1, r2, r8, lsl #2
; CHECK-NEXT: vadd.f32 s12, s7, s5
; CHECK-NEXT: vstr s10, [r1]
; CHECK-NEXT: add.w r1, r2, r4, lsl #2
; CHECK-NEXT: vstr s14, [r1]
-; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
-; CHECK-NEXT: vadd.f32 s4, s3, s1
+; CHECK-NEXT: adds r1, r0, #4
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
+; CHECK-NEXT: vadd.f32 s4, s3, s1
; CHECK-NEXT: vstr s8, [r1]
-; CHECK-NEXT: ldr r1, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #5
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s12, [r1]
-; CHECK-NEXT: ldr r1, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #6
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s4, [r1]
-; CHECK-NEXT: ldr r1, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT: adds r1, r0, #7
+; CHECK-NEXT: adds r0, #8
; CHECK-NEXT: add.w r1, r2, r1, lsl #2
; CHECK-NEXT: vstr s6, [r1]
-; CHECK-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload
; CHECK-NEXT: add r12, r1
-; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
; CHECK-NEXT: cmp r0, r1
; CHECK-NEXT: blo.w .LBB7_2
; CHECK-NEXT: .LBB7_5: @ %for.cond.cleanup
-; CHECK-NEXT: add sp, #88
+; CHECK-NEXT: add sp, #64
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
diff --git a/llvm/test/CodeGen/Thumb2/mve-qrintrsplat.ll b/llvm/test/CodeGen/Thumb2/mve-qrintrsplat.ll
index 29c4fb9..413c4a1 100644
--- a/llvm/test/CodeGen/Thumb2/mve-qrintrsplat.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-qrintrsplat.ll
@@ -1496,15 +1496,14 @@ define void @vfmasq(ptr %x, ptr %y, i32 %n) {
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
; CHECK-NEXT: .LBB34_1: @ %for.body.preheader
-; CHECK-NEXT: vmov.f32 q0, #1.000000e+01
; CHECK-NEXT: dlstp.32 lr, r2
; CHECK-NEXT: .LBB34_2: @ %for.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmov q3, q0
-; CHECK-NEXT: vldrw.u32 q1, [r1]
-; CHECK-NEXT: vldrw.u32 q2, [r0], #16
-; CHECK-NEXT: vfma.f32 q3, q2, q1
-; CHECK-NEXT: vstrw.32 q3, [r1], #16
+; CHECK-NEXT: vmov.f32 q2, #1.000000e+01
+; CHECK-NEXT: vldrw.u32 q0, [r1]
+; CHECK-NEXT: vldrw.u32 q1, [r0], #16
+; CHECK-NEXT: vfma.f32 q2, q1, q0
+; CHECK-NEXT: vstrw.32 q2, [r1], #16
; CHECK-NEXT: letp lr, .LBB34_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
@@ -1542,15 +1541,14 @@ define void @vfmas(ptr %s1, ptr %s2, i32 %N) {
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
; CHECK-NEXT: .LBB35_1: @ %while.body.lr.ph
-; CHECK-NEXT: vmov.f32 q0, #1.000000e+01
; CHECK-NEXT: dlstp.32 lr, r2
; CHECK-NEXT: .LBB35_2: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmov q3, q0
-; CHECK-NEXT: vldrw.u32 q1, [r1]
-; CHECK-NEXT: vldrw.u32 q2, [r0]
-; CHECK-NEXT: vfma.f32 q3, q2, q1
-; CHECK-NEXT: vstrw.32 q3, [r0], #16
+; CHECK-NEXT: vmov.f32 q2, #1.000000e+01
+; CHECK-NEXT: vldrw.u32 q0, [r1]
+; CHECK-NEXT: vldrw.u32 q1, [r0]
+; CHECK-NEXT: vfma.f32 q2, q1, q0
+; CHECK-NEXT: vstrw.32 q2, [r0], #16
; CHECK-NEXT: letp lr, .LBB35_2
; CHECK-NEXT: @ %bb.3: @ %while.end
; CHECK-NEXT: pop {r7, pc}
diff --git a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
index e845070..62482c1 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
@@ -287,17 +287,17 @@ define void @shlor(ptr nocapture readonly %x, ptr noalias nocapture %y, i32 %n)
; CHECK-NEXT: cmp r2, #1
; CHECK-NEXT: blt .LBB5_3
; CHECK-NEXT: @ %bb.1: @ %vector.ph
-; CHECK-NEXT: adr.w lr, .LCPI5_0
-; CHECK-NEXT: adr r4, .LCPI5_1
+; CHECK-NEXT: adr r4, .LCPI5_0
+; CHECK-NEXT: adr r3, .LCPI5_1
; CHECK-NEXT: adr r5, .LCPI5_2
; CHECK-NEXT: adr r6, .LCPI5_3
-; CHECK-NEXT: vldrw.u32 q2, [r4]
+; CHECK-NEXT: vldrw.u32 q2, [r3]
+; CHECK-NEXT: vldrw.u32 q3, [r4]
; CHECK-NEXT: vldrw.u32 q0, [r6]
; CHECK-NEXT: vldrw.u32 q1, [r5]
-; CHECK-NEXT: vldrw.u32 q3, [lr]
+; CHECK-NEXT: vadd.i32 q2, q2, r1
; CHECK-NEXT: vadd.i32 q0, q0, r1
; CHECK-NEXT: vadd.i32 q1, q1, r1
-; CHECK-NEXT: vadd.i32 q2, q2, r1
; CHECK-NEXT: vadd.i32 q3, q3, r1
; CHECK-NEXT: mov.w r12, #1
; CHECK-NEXT: movs r4, #3
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
index f9948db..c92c2be 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
@@ -656,14 +656,12 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %b) {
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
; CHECK-NEXT: vmov q2, q0
; CHECK-NEXT: vcmp.i8 eq, q1, zr
; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.i8 q1, #0xff
; CHECK-NEXT: vpsel q5, q1, q0
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.u8 r0, q5[0]
; CHECK-NEXT: vmov.16 q3[0], r0
; CHECK-NEXT: vmov.u8 r0, q5[1]
@@ -706,7 +704,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %b) {
; CHECK-NEXT: orrs r1, r3
; CHECK-NEXT: add r0, r2
; CHECK-NEXT: vmov r2, r3, d15
-; CHECK-NEXT: vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q7, #0x0
; CHECK-NEXT: vmov q0[2], q0[0], r2, r3
; CHECK-NEXT: vmov q0[3], q0[1], r2, r3
; CHECK-NEXT: vmov.u8 r2, q2[3]
@@ -785,6 +783,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %b) {
; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
; CHECK-NEXT: vcmp.i32 ne, q0, zr
; CHECK-NEXT: vpsel q6, q1, q7
+; CHECK-NEXT: vmov.i8 q7, #0x0
; CHECK-NEXT: vmov r2, r3, d12
; CHECK-NEXT: vmov q0[2], q0[0], r2, r3
; CHECK-NEXT: vmov q0[3], q0[1], r2, r3
@@ -853,7 +852,6 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %b) {
; CHECK-NEXT: vmov r2, r3, d1
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adcs r1, r3
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: bx lr
entry:
@@ -2065,14 +2063,12 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %b
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
; CHECK-NEXT: vmov q2, q0
; CHECK-NEXT: vcmp.i8 eq, q1, zr
; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.i8 q1, #0xff
; CHECK-NEXT: vpsel q5, q1, q0
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.u8 r2, q5[0]
; CHECK-NEXT: vmov.16 q3[0], r2
; CHECK-NEXT: vmov.u8 r2, q5[1]
@@ -2115,7 +2111,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %b
; CHECK-NEXT: orr.w lr, lr, r3
; CHECK-NEXT: add r12, r2
; CHECK-NEXT: vmov r3, r2, d15
-; CHECK-NEXT: vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q7, #0x0
; CHECK-NEXT: vmov q0[2], q0[0], r3, r2
; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
; CHECK-NEXT: vmov.u8 r2, q2[3]
@@ -2194,6 +2190,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %b
; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
; CHECK-NEXT: vcmp.i32 ne, q0, zr
; CHECK-NEXT: vpsel q6, q1, q7
+; CHECK-NEXT: vmov.i8 q7, #0x0
; CHECK-NEXT: vmov r2, r3, d12
; CHECK-NEXT: vmov q0[2], q0[0], r2, r3
; CHECK-NEXT: vmov q0[3], q0[1], r2, r3
@@ -2264,7 +2261,6 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %b
; CHECK-NEXT: adc.w r3, r3, lr
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adcs r1, r3
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: pop {r7, pc}
entry:
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
index 63b1431..9f55183 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
@@ -817,16 +817,14 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q3, q0
-; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vcmp.i8 eq, q2, zr
+; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.i8 q2, #0xff
; CHECK-NEXT: vpsel q6, q2, q0
-; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov.i8 q2, #0xff
; CHECK-NEXT: vmov.u8 r0, q6[0]
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.i8 q4, #0x0
; CHECK-NEXT: vmov.16 q0[0], r0
; CHECK-NEXT: vmov.u8 r0, q6[1]
; CHECK-NEXT: vmov.16 q0[1], r0
@@ -842,9 +840,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov.16 q0[6], r0
; CHECK-NEXT: vmov.u8 r0, q6[7]
; CHECK-NEXT: vmov.16 q0[7], r0
-; CHECK-NEXT: vstrw.32 q2, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT: vcmp.i16 ne, q0, zr
; CHECK-NEXT: vmov.u8 r2, q3[0]
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
; CHECK-NEXT: vpsel q7, q2, q4
; CHECK-NEXT: vmov.u16 r0, q7[2]
; CHECK-NEXT: vmov.u16 r1, q7[0]
@@ -895,7 +892,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov r2, s2
; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: vmov r1, s16
-; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q4, #0xff
; CHECK-NEXT: vmov r3, s0
; CHECK-NEXT: umull r0, r2, r0, r2
; CHECK-NEXT: umull r1, r3, r1, r3
@@ -916,8 +913,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov.u16 r3, q7[5]
; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT: vpsel q0, q0, q4
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vpsel q0, q4, q0
; CHECK-NEXT: vmov r2, r3, d0
; CHECK-NEXT: vmov q4[2], q4[0], r2, r3
; CHECK-NEXT: vmov q4[3], q4[1], r2, r3
@@ -932,7 +929,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov r2, s18
; CHECK-NEXT: vmov r0, s30
; CHECK-NEXT: vmov r1, s28
-; CHECK-NEXT: vldrw.u32 q7, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q7, #0xff
; CHECK-NEXT: vmov r3, s16
; CHECK-NEXT: umull r0, r2, r0, r2
; CHECK-NEXT: umull r1, r3, r1, r3
@@ -960,7 +957,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov r2, s2
; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: vmov r1, s16
-; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q4, #0x0
; CHECK-NEXT: vmov r3, s0
; CHECK-NEXT: umull r0, r2, r0, r2
; CHECK-NEXT: umull r1, r3, r1, r3
@@ -1041,7 +1038,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov r2, s2
; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: vmov r1, s16
-; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q4, #0x0
; CHECK-NEXT: vmov r3, s0
; CHECK-NEXT: umull r0, r2, r0, r2
; CHECK-NEXT: umull r1, r3, r1, r3
@@ -1062,7 +1059,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov.u16 r3, q6[5]
; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q0, #0xff
; CHECK-NEXT: vpsel q0, q0, q4
; CHECK-NEXT: vmov r2, r3, d0
; CHECK-NEXT: vmov q4[2], q4[0], r2, r3
@@ -1117,7 +1114,6 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: adc.w r1, r1, lr
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adcs r1, r3
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -1137,16 +1133,14 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
; CHECK-NEXT: vmov q3, q0
; CHECK-NEXT: vcmp.i8 eq, q2, zr
; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.i8 q2, #0xff
; CHECK-NEXT: vpsel q5, q2, q0
-; CHECK-NEXT: vmov.s8 r2, q1[0]
+; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.u8 r0, q5[0]
-; CHECK-NEXT: vmov.s8 r3, q3[0]
+; CHECK-NEXT: vmov.s8 r2, q1[0]
; CHECK-NEXT: vmov.16 q4[0], r0
; CHECK-NEXT: vmov.u8 r0, q5[1]
; CHECK-NEXT: vmov.16 q4[1], r0
@@ -1162,9 +1156,9 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov.16 q4[6], r0
; CHECK-NEXT: vmov.u8 r0, q5[7]
; CHECK-NEXT: vmov.16 q4[7], r0
-; CHECK-NEXT: smull r2, r3, r3, r2
+; CHECK-NEXT: vmov.s8 r3, q3[0]
; CHECK-NEXT: vcmp.i16 ne, q4, zr
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: smull r2, r3, r3, r2
; CHECK-NEXT: vpsel q6, q2, q0
; CHECK-NEXT: vmov.u16 r0, q6[2]
; CHECK-NEXT: vmov.u16 r1, q6[0]
@@ -1198,7 +1192,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov.s8 r3, q3[3]
; CHECK-NEXT: smull r0, r1, r1, r0
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q7, #0x0
; CHECK-NEXT: smull r2, r3, r3, r2
; CHECK-NEXT: vmov q0[2], q0[0], r0, r2
; CHECK-NEXT: vmov q0[3], q0[1], r1, r3
@@ -1219,7 +1213,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
; CHECK-NEXT: smull r0, r1, r1, r0
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vpsel q6, q2, q7
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vpsel q6, q2, q0
; CHECK-NEXT: vmov r2, r3, d12
; CHECK-NEXT: vmov q0[2], q0[0], r2, r3
; CHECK-NEXT: vmov q0[3], q0[1], r2, r3
@@ -1273,17 +1268,18 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: vmov.16 q6[7], r2
; CHECK-NEXT: vmov.s8 r0, q1[8]
; CHECK-NEXT: vcmp.i16 ne, q6, zr
+; CHECK-NEXT: vmov.i8 q6, #0x0
+; CHECK-NEXT: vpsel q5, q2, q6
; CHECK-NEXT: vmov.s8 r1, q3[8]
-; CHECK-NEXT: vpsel q5, q2, q7
-; CHECK-NEXT: smull r0, r1, r1, r0
; CHECK-NEXT: vmov.u16 r2, q5[2]
; CHECK-NEXT: vmov.u16 r3, q5[0]
; CHECK-NEXT: vmov q0[2], q0[0], r3, r2
; CHECK-NEXT: vmov.u16 r2, q5[3]
; CHECK-NEXT: vmov.u16 r3, q5[1]
+; CHECK-NEXT: smull r0, r1, r1, r0
; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vpsel q6, q2, q7
+; CHECK-NEXT: vpsel q6, q2, q6
; CHECK-NEXT: vmov r2, r3, d12
; CHECK-NEXT: vmov q0[2], q0[0], r2, r3
; CHECK-NEXT: vmov q0[3], q0[1], r2, r3
@@ -1365,7 +1361,6 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: adc.w r1, r1, lr
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adcs r1, r3
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -2296,16 +2291,14 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q3, q0
-; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vcmp.i8 eq, q2, zr
+; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.i8 q2, #0xff
; CHECK-NEXT: vpsel q6, q2, q0
-; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov.i8 q2, #0xff
; CHECK-NEXT: vmov.u8 r2, q6[0]
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.i8 q4, #0x0
; CHECK-NEXT: vmov.16 q0[0], r2
; CHECK-NEXT: vmov.u8 r2, q6[1]
; CHECK-NEXT: vmov.16 q0[1], r2
@@ -2321,9 +2314,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov.16 q0[6], r2
; CHECK-NEXT: vmov.u8 r2, q6[7]
; CHECK-NEXT: vmov.16 q0[7], r2
-; CHECK-NEXT: vstrw.32 q2, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT: vcmp.i16 ne, q0, zr
; CHECK-NEXT: vmov.u8 r4, q3[2]
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
; CHECK-NEXT: vpsel q7, q2, q4
; CHECK-NEXT: vmov.u16 r2, q7[2]
; CHECK-NEXT: vmov.u16 r3, q7[0]
@@ -2374,7 +2366,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov r2, s2
; CHECK-NEXT: vmov r3, s18
; CHECK-NEXT: vmov r5, s16
-; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q4, #0xff
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: umull r2, r3, r3, r2
; CHECK-NEXT: umull r4, r5, r5, r4
@@ -2395,8 +2387,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov.u16 r4, q7[5]
; CHECK-NEXT: vmov q0[3], q0[1], r4, r5
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT: vpsel q0, q0, q4
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vpsel q0, q4, q0
; CHECK-NEXT: vmov r5, r4, d0
; CHECK-NEXT: vmov q4[2], q4[0], r5, r4
; CHECK-NEXT: vmov q4[3], q4[1], r5, r4
@@ -2411,7 +2403,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov r5, s18
; CHECK-NEXT: vmov r2, s30
; CHECK-NEXT: vmov r3, s28
-; CHECK-NEXT: vldrw.u32 q7, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q7, #0xff
; CHECK-NEXT: vmov r4, s16
; CHECK-NEXT: umull r2, r5, r2, r5
; CHECK-NEXT: umull r3, r4, r3, r4
@@ -2439,7 +2431,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov r5, s2
; CHECK-NEXT: vmov r2, s18
; CHECK-NEXT: vmov r3, s16
-; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q4, #0x0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: umull r2, r5, r2, r5
; CHECK-NEXT: umull r3, r4, r3, r4
@@ -2520,7 +2512,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov r5, s2
; CHECK-NEXT: vmov r2, s18
; CHECK-NEXT: vmov r3, s16
-; CHECK-NEXT: vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q4, #0x0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: umull r2, r5, r2, r5
; CHECK-NEXT: umull r3, r4, r3, r4
@@ -2541,7 +2533,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov.u16 r4, q6[5]
; CHECK-NEXT: vmov q0[3], q0[1], r4, r5
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q0, #0xff
; CHECK-NEXT: vpsel q0, q0, q4
; CHECK-NEXT: vmov r5, r4, d0
; CHECK-NEXT: vmov q4[2], q4[0], r5, r4
@@ -2598,7 +2590,6 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: adcs r3, r4
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adcs r1, r3
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -2619,14 +2610,12 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
; CHECK-NEXT: vmov q3, q0
; CHECK-NEXT: vcmp.i8 eq, q2, zr
; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.i8 q2, #0xff
; CHECK-NEXT: vpsel q5, q2, q0
-; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.i8 q0, #0x0
; CHECK-NEXT: vmov.u8 r2, q5[0]
; CHECK-NEXT: vmov.s8 r4, q1[2]
; CHECK-NEXT: vmov.16 q4[0], r2
@@ -2676,7 +2665,7 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: adc.w r12, r12, r2
; CHECK-NEXT: vmov r2, r3, d15
; CHECK-NEXT: vmov q0[2], q0[0], r2, r3
-; CHECK-NEXT: vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT: vmov.i8 q7, #0x0
; CHECK-NEXT: vmov q0[3], q0[1], r2, r3
; CHECK-NEXT: vmov.s8 r2, q1[3]
; CHECK-NEXT: vmov.s8 r3, q3[3]
@@ -2701,7 +2690,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov q0[3], q0[1], r4, r5
; CHECK-NEXT: smull r2, r3, r3, r2
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vpsel q6, q2, q7
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vpsel q6, q2, q0
; CHECK-NEXT: vmov r5, r4, d12
; CHECK-NEXT: vmov q0[2], q0[0], r5, r4
; CHECK-NEXT: vmov q0[3], q0[1], r5, r4
@@ -2755,17 +2745,18 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: vmov.16 q6[7], r5
; CHECK-NEXT: vmov.s8 r2, q1[8]
; CHECK-NEXT: vcmp.i16 ne, q6, zr
+; CHECK-NEXT: vmov.i8 q6, #0x0
+; CHECK-NEXT: vpsel q5, q2, q6
; CHECK-NEXT: vmov.s8 r3, q3[8]
-; CHECK-NEXT: vpsel q5, q2, q7
-; CHECK-NEXT: smull r2, r3, r3, r2
; CHECK-NEXT: vmov.u16 r5, q5[2]
; CHECK-NEXT: vmov.u16 r4, q5[0]
; CHECK-NEXT: vmov q0[2], q0[0], r4, r5
; CHECK-NEXT: vmov.u16 r5, q5[3]
; CHECK-NEXT: vmov.u16 r4, q5[1]
+; CHECK-NEXT: smull r2, r3, r3, r2
; CHECK-NEXT: vmov q0[3], q0[1], r4, r5
; CHECK-NEXT: vcmp.i32 ne, q0, zr
-; CHECK-NEXT: vpsel q6, q2, q7
+; CHECK-NEXT: vpsel q6, q2, q6
; CHECK-NEXT: vmov r5, r4, d12
; CHECK-NEXT: vmov q0[2], q0[0], r5, r4
; CHECK-NEXT: vmov q0[3], q0[1], r5, r4
@@ -2849,7 +2840,6 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, <16 x i8> %y
; CHECK-NEXT: adcs r3, r4
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adcs r1, r3
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
diff --git a/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll b/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll
index 0c349c3..cba394f 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll
@@ -59,18 +59,18 @@ define void @foo(i32 %M, i32 %N, i32 %K, ptr %A, ptr %B_rcr4, ptr %C, i32 %c_row
; CHECK-NEXT: [[MOVSX64rm32_:%[0-9]+]]:gr64_nosp = MOVSX64rm32 %fixed-stack.2, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.2, align 8)
; CHECK-NEXT: [[MOVSX64rr32_3:%[0-9]+]]:gr64_nosp = MOVSX64rr32 [[MOV32rm2]].sub_32bit
; CHECK-NEXT: [[MOVSX64rm32_1:%[0-9]+]]:gr64 = MOVSX64rm32 %fixed-stack.1, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.1, align 16)
- ; CHECK-NEXT: MOV64mr %stack.5, 1, $noreg, 0, $noreg, [[MOVSX64rm32_1]] :: (store (s64) into %stack.5)
; CHECK-NEXT: [[MOVSX64rr32_4:%[0-9]+]]:gr64 = MOVSX64rr32 [[MOV32rm1]]
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr32 = COPY [[MOV32rm3]]
- ; CHECK-NEXT: [[MOVSX64rr32_5:%[0-9]+]]:gr64 = MOVSX64rr32 [[COPY6]]
+ ; CHECK-NEXT: [[MOVSX64rr32_5:%[0-9]+]]:gr64 = MOVSX64rr32 [[MOV32rm3]]
; CHECK-NEXT: [[MOVSX64rr32_6:%[0-9]+]]:gr64 = MOVSX64rr32 [[MOV32rm]]
; CHECK-NEXT: MOV64mr %stack.8, 1, $noreg, 0, $noreg, [[MOVSX64rr32_6]] :: (store (s64) into %stack.8)
+ ; CHECK-NEXT: MOV64mr %stack.5, 1, $noreg, 0, $noreg, [[COPY1]] :: (store (s64) into %stack.5)
; CHECK-NEXT: MOV64mr %stack.6, 1, $noreg, 0, $noreg, [[MOVSX64rr32_4]] :: (store (s64) into %stack.6)
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_nosp = COPY [[MOVSX64rr32_4]]
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_nosp = IMUL64rr [[COPY7]], [[MOVSX64rr32_2]], implicit-def dead $eflags
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_nosp = ADD64rr [[COPY7]], [[MOVSX64rm32_]], implicit-def dead $eflags
- ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 4, [[COPY7]], 0, $noreg
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64_nosp = COPY [[MOVSX64rr32_4]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64_nosp = IMUL64rr [[COPY6]], [[MOVSX64rr32_2]], implicit-def dead $eflags
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64_nosp = ADD64rr [[COPY6]], [[MOVSX64rm32_]], implicit-def dead $eflags
+ ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 4, [[COPY6]], 0, $noreg
; CHECK-NEXT: MOV64mr %stack.9, 1, $noreg, 0, $noreg, [[LEA64r]] :: (store (s64) into %stack.9)
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_with_sub_8bit = COPY [[MOV32rm2]]
; CHECK-NEXT: MOV64mr %stack.7, 1, $noreg, 0, $noreg, [[MOVSX64rr32_5]] :: (store (s64) into %stack.7)
; CHECK-NEXT: [[COPY8:%[0-9]+]]:gr64 = COPY [[MOVSX64rr32_5]]
; CHECK-NEXT: [[COPY8:%[0-9]+]]:gr64 = IMUL64rr [[COPY8]], [[MOVSX64rr32_2]], implicit-def dead $eflags
@@ -87,8 +87,11 @@ define void @foo(i32 %M, i32 %N, i32 %K, ptr %A, ptr %B_rcr4, ptr %C, i32 %c_row
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[MOV32rm4:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.2, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.2, align 8)
; CHECK-NEXT: CMP32rm [[MOV32rm4]], %fixed-stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags :: (load (s32) from %fixed-stack.1, align 16)
- ; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.3, 1, $noreg, 0, $noreg :: (load (s64) from %stack.3)
- ; CHECK-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm %stack.5, 1, $noreg, 0, $noreg :: (load (s64) from %stack.5)
+ ; CHECK-NEXT: [[MOV32rm5:%[0-9]+]]:gr32 = MOV32rm %stack.2, 1, $noreg, 0, $noreg :: (load (s32) from %stack.2)
+ ; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64_nosp = MOV64rm %stack.3, 1, $noreg, 0, $noreg :: (load (s64) from %stack.3)
+ ; CHECK-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm %stack.11, 1, $noreg, 0, $noreg :: (load (s64) from %stack.11)
+ ; CHECK-NEXT: [[MOV32rm6:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.3, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.3, align 16)
+ ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gr32 = COPY [[MOV32rm6]]
; CHECK-NEXT: JCC_1 %bb.5, 13, implicit $eflags
; CHECK-NEXT: JMP_1 %bb.3
; CHECK-NEXT: {{ $}}
@@ -98,9 +101,8 @@ define void @foo(i32 %M, i32 %N, i32 %K, ptr %A, ptr %B_rcr4, ptr %C, i32 %c_row
; CHECK-NEXT: [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm %stack.6, 1, $noreg, 0, $noreg :: (load (s64) from %stack.6)
; CHECK-NEXT: [[MOV64rm2:%[0-9]+]]:gr64 = nsw IMUL64rr [[MOV64rm2]], [[MOVSX64rr32_]], implicit-def dead $eflags
; CHECK-NEXT: [[MOV64rm2:%[0-9]+]]:gr64 = ADD64rm [[MOV64rm2]], %stack.1, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load (s64) from %stack.1)
- ; CHECK-NEXT: MOV64mr %stack.13, 1, $noreg, 0, $noreg, [[MOV64rm2]] :: (store (s64) into %stack.13)
- ; CHECK-NEXT: [[MOV32rm5:%[0-9]+]]:gr32 = MOV32rm %stack.12, 1, $noreg, 0, $noreg :: (load (s32) from %stack.12)
- ; CHECK-NEXT: undef [[COPY9:%[0-9]+]].sub_32bit:gr64_nosp = COPY [[MOV32rm5]]
+ ; CHECK-NEXT: [[MOV32rm7:%[0-9]+]]:gr32 = MOV32rm %stack.12, 1, $noreg, 0, $noreg :: (load (s32) from %stack.12)
+ ; CHECK-NEXT: undef [[COPY10:%[0-9]+]].sub_32bit:gr64_nosp = COPY [[MOV32rm7]]
; CHECK-NEXT: [[MOV64rm3:%[0-9]+]]:gr64 = MOV64rm %stack.9, 1, $noreg, 0, $noreg :: (load (s64) from %stack.9)
; CHECK-NEXT: [[MOV64rm4:%[0-9]+]]:gr64 = MOV64rm %stack.4, 1, $noreg, 0, $noreg :: (load (s64) from %stack.4)
; CHECK-NEXT: JMP_1 %bb.6
@@ -123,40 +125,30 @@ define void @foo(i32 %M, i32 %N, i32 %K, ptr %A, ptr %B_rcr4, ptr %C, i32 %c_row
; CHECK-NEXT: bb.6.for.body17:
; CHECK-NEXT: successors: %bb.6(0x7c000000), %bb.5(0x04000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTILEZEROV [[COPY6]].sub_16bit, [[MOV32rm2]].sub_16bit
- ; CHECK-NEXT: [[MOV64rm7:%[0-9]+]]:gr64 = MOV64rm %stack.13, 1, $noreg, 0, $noreg :: (load (s64) from %stack.13)
- ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[COPY6]].sub_16bit, [[COPY4]].sub_16bit, [[MOV64rm7]], 1, [[MOVSX64rr32_]], 0, $noreg
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gr64_nosp = MOVSX64rr32 [[COPY9]].sub_32bit
- ; CHECK-NEXT: [[COPY10:%[0-9]+]]:gr32 = COPY [[LEA64_32r1]]
- ; CHECK-NEXT: [[COPY11:%[0-9]+]]:gr64 = COPY [[MOV64rm1]]
- ; CHECK-NEXT: [[COPY12:%[0-9]+]]:gr32 = COPY [[COPY4]]
- ; CHECK-NEXT: [[COPY13:%[0-9]+]]:gr32 = COPY [[COPY6]]
- ; CHECK-NEXT: [[COPY14:%[0-9]+]]:gr64 = COPY [[MOVSX64rr32_3]]
- ; CHECK-NEXT: [[COPY15:%[0-9]+]]:gr64 = COPY [[MOVSX64rr32_2]]
- ; CHECK-NEXT: [[COPY16:%[0-9]+]]:gr64 = COPY [[MOVSX64rr32_]]
- ; CHECK-NEXT: [[COPY17:%[0-9]+]]:gr64 = COPY [[MOV32rm2]]
- ; CHECK-NEXT: [[COPY18:%[0-9]+]]:gr64 = COPY [[COPY1]]
- ; CHECK-NEXT: [[LEA64r2:%[0-9]+]]:gr64 = LEA64r [[COPY18]], 1, [[COPY9]], 0, $noreg
- ; CHECK-NEXT: [[COPY19:%[0-9]+]]:gr64_nosp = COPY [[MOV64rm]]
- ; CHECK-NEXT: [[MOV32rm6:%[0-9]+]]:gr32 = MOV32rm %stack.2, 1, $noreg, 0, $noreg :: (load (s32) from %stack.2)
- ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[MOV32rm6]].sub_16bit, [[COPY10]].sub_16bit, [[LEA64r2]], 1, [[COPY19]], 0, $noreg
- ; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = COPY [[COPY19]]
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY [[COPY18]]
- ; CHECK-NEXT: [[MOV32rm2:%[0-9]+]]:gr64_nosp = COPY [[COPY17]]
- ; CHECK-NEXT: [[MOVSX64rr32_:%[0-9]+]]:gr64_nosp = COPY [[COPY16]]
- ; CHECK-NEXT: [[MOVSX64rr32_2:%[0-9]+]]:gr64_nosp = COPY [[COPY15]]
- ; CHECK-NEXT: [[MOVSX64rr32_3:%[0-9]+]]:gr64_nosp = COPY [[COPY14]]
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr32 = COPY [[COPY13]]
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr32 = COPY [[COPY12]]
- ; CHECK-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = COPY [[COPY11]]
- ; CHECK-NEXT: [[MOV64rm8:%[0-9]+]]:gr64 = MOV64rm %stack.11, 1, $noreg, 0, $noreg :: (load (s64) from %stack.11)
- ; CHECK-NEXT: [[LEA64_32r1:%[0-9]+]]:gr32 = COPY [[COPY10]]
- ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTDPBSSDV [[COPY6]].sub_16bit, [[LEA64_32r1]].sub_16bit, [[COPY4]].sub_16bit, [[PTILEZEROV]], [[PTILELOADDV]], [[PTILELOADDV1]]
- ; CHECK-NEXT: PTILESTOREDV [[COPY6]].sub_16bit, [[MOV32rm2]].sub_16bit, [[MOV64rm3]], 1, [[MOVSX64rr32_2]], 0, $noreg, [[PTILEZEROV]]
+ ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTILEZEROV [[COPY9]].sub_16bit, [[COPY7]].sub_16bit
+ ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[COPY9]].sub_16bit, [[COPY4]].sub_16bit, [[MOV64rm2]], 1, [[MOVSX64rr32_]], 0, $noreg
+ ; CHECK-NEXT: [[COPY10:%[0-9]+]]:gr64_nosp = MOVSX64rr32 [[COPY10]].sub_32bit
+ ; CHECK-NEXT: [[COPY11:%[0-9]+]]:gr64 = COPY [[MOVSX64rm32_1]]
+ ; CHECK-NEXT: [[COPY12:%[0-9]+]]:gr64 = COPY [[MOVSX64rr32_3]]
+ ; CHECK-NEXT: [[COPY13:%[0-9]+]]:gr64 = COPY [[MOVSX64rr32_2]]
+ ; CHECK-NEXT: [[COPY14:%[0-9]+]]:gr64 = COPY [[MOVSX64rr32_]]
+ ; CHECK-NEXT: [[COPY15:%[0-9]+]]:gr64 = COPY [[COPY7]]
+ ; CHECK-NEXT: [[MOV64rm7:%[0-9]+]]:gr64 = MOV64rm %stack.5, 1, $noreg, 0, $noreg :: (load (s64) from %stack.5)
+ ; CHECK-NEXT: [[LEA64r2:%[0-9]+]]:gr64 = LEA64r [[MOV64rm7]], 1, [[COPY10]], 0, $noreg
+ ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[MOV32rm5]].sub_16bit, [[LEA64_32r1]].sub_16bit, [[LEA64r2]], 1, [[MOV64rm]], 0, $noreg
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_with_sub_8bit = COPY [[COPY15]]
+ ; CHECK-NEXT: [[MOVSX64rr32_:%[0-9]+]]:gr64_nosp = COPY [[COPY14]]
+ ; CHECK-NEXT: [[MOVSX64rr32_2:%[0-9]+]]:gr64_nosp = COPY [[COPY13]]
+ ; CHECK-NEXT: [[MOVSX64rr32_3:%[0-9]+]]:gr64_nosp = COPY [[COPY12]]
+ ; CHECK-NEXT: [[MOVSX64rm32_1:%[0-9]+]]:gr64 = COPY [[COPY11]]
+ ; CHECK-NEXT: [[MOV32rm8:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.3, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.3, align 16)
+ ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gr32 = COPY [[MOV32rm8]]
+ ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTDPBSSDV [[COPY9]].sub_16bit, [[LEA64_32r1]].sub_16bit, [[COPY4]].sub_16bit, [[PTILEZEROV]], [[PTILELOADDV]], [[PTILELOADDV1]]
+ ; CHECK-NEXT: PTILESTOREDV [[COPY9]].sub_16bit, [[COPY7]].sub_16bit, [[MOV64rm3]], 1, [[MOVSX64rr32_2]], 0, $noreg, [[PTILEZEROV]]
; CHECK-NEXT: [[MOV64rm4:%[0-9]+]]:gr64 = ADD64rr [[MOV64rm4]], [[MOVSX64rr32_3]], implicit-def dead $eflags
- ; CHECK-NEXT: [[MOV64rm3:%[0-9]+]]:gr64 = ADD64rr [[MOV64rm3]], [[MOV64rm8]], implicit-def dead $eflags
- ; CHECK-NEXT: [[COPY9:%[0-9]+]].sub_32bit:gr64_nosp = ADD32rr [[COPY9]].sub_32bit, [[LEA64_32r1]], implicit-def dead $eflags
- ; CHECK-NEXT: CMP64rr [[MOV64rm4]], [[MOV64rm1]], implicit-def $eflags
+ ; CHECK-NEXT: [[MOV64rm3:%[0-9]+]]:gr64 = ADD64rr [[MOV64rm3]], [[MOV64rm1]], implicit-def dead $eflags
+ ; CHECK-NEXT: [[COPY10:%[0-9]+]].sub_32bit:gr64_nosp = ADD32rr [[COPY10]].sub_32bit, [[LEA64_32r1]], implicit-def dead $eflags
+ ; CHECK-NEXT: CMP64rr [[MOV64rm4]], [[MOVSX64rm32_1]], implicit-def $eflags
; CHECK-NEXT: JCC_1 %bb.6, 12, implicit $eflags
; CHECK-NEXT: JMP_1 %bb.5
entry:
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
index 31de686..92e4588 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
@@ -148,21 +148,21 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: G_BRCOND [[TRUNC]](s1), %bb.2
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; CHECK-NEXT: G_BRCOND [[TRUNC1]](s1), %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.cond.false:
; CHECK-NEXT: successors: %bb.2(0x80000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
- ; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.cond.end:
- ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC2]](s8), %bb.1, [[TRUNC1]](s8), %bb.0
- ; CHECK-NEXT: $al = COPY [[PHI]](s8)
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s1) = G_PHI [[TRUNC2]](s1), %bb.1, [[TRUNC]](s1), %bb.0
+ ; CHECK-NEXT: [[EXT:%[0-9]+]]:_(s8) = G_ANYEXT [[PHI]](s1)
+ ; CHECK-NEXT: $al = COPY [[EXT]](s8)
; CHECK-NEXT: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir
new file mode 100644
index 0000000..b02832b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir
@@ -0,0 +1,32 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=avx2 -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,AVX2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=sse2 -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,SSE2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=avx512f -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,AVX512F
+
+
+---
+name: test_basic_g_implicit_def_v8i64
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_basic_g_implicit_def_v8i64
+ ; AVX512F: {{%[0-9]+}}:_(<8 x s64>) = G_IMPLICIT_DEF
+ ; AVX2: [[DEF_AVX2:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; AVX2-NEXT: {{%[0-9]+}}:_(<8 x s64>) = G_CONCAT_VECTORS [[DEF_AVX2]](<4 x s64>), [[DEF_AVX2]](<4 x s64>)
+ ; SSE2: [[DEF_SSE2:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+ ; SSE2-NEXT: {{%[0-9]+}}:_(<8 x s64>) = G_CONCAT_VECTORS [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>)
+ %0:_(<8 x s64>) = G_IMPLICIT_DEF
+ RET 0, implicit %0
+...
+
+---
+name: test_g_implicit_def_cample_size
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: test_g_implicit_def_cample_size
+ ; AVX512: {{%[0-9]+}}:_(<8 x s64>) = G_IMPLICIT_DEF
+ ; AVX2: {{%[0-9]+}}:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; SSE2: {{%[0-9]+}}:_(<2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<5 x s63>) = G_IMPLICIT_DEF
+ RET 0, implicit %0
+...
+
+
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir
new file mode 100644
index 0000000..254c1b6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_cfb_vec256
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $ymm0
+
+ ; CHECK-LABEL: name: select_cfb_vec256
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $ymm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $ymm1
+ %0:vecr(<8 x s32>) = COPY $ymm0
+ %1:vecr(<8 x s32>) = G_CONSTANT_FOLD_BARRIER %0
+ $ymm1 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir
new file mode 100644
index 0000000..3da354b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_cfb_vec512
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $zmm0
+
+ ; CHECK-LABEL: name: select_cfb_vec512
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $zmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $zmm1
+ %0:vecr(<8 x s64>) = COPY $zmm0
+ %1:vecr(<8 x s64>) = G_CONSTANT_FOLD_BARRIER %0
+ $zmm1 = COPY %1(<8 x s64>)
+ RET 0, implicit $zmm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir
new file mode 100644
index 0000000..fa012f9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir
@@ -0,0 +1,77 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+
+---
+name: select_cfb_scalar_s32
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $edi
+
+ ; CHECK-LABEL: name: select_cfb_scalar_s32
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $eax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $eax
+ %0:gpr(s32) = COPY $edi
+ %1:gpr(s32) = G_CONSTANT_FOLD_BARRIER %0
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
+...
+
+---
+name: select_cfb_scalar_s64
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi
+
+ ; CHECK-LABEL: name: select_cfb_scalar_s64
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $rax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $rax
+ %0:gpr(s64) = COPY $rdi
+ %1:gpr(s64) = G_CONSTANT_FOLD_BARRIER %0
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
+...
+
+
+---
+name: select_cfb_vec128
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $xmm0
+
+ ; CHECK-LABEL: name: select_cfb_vec128
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $xmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $xmm1
+ %0:vecr(<4 x s32>) = COPY $xmm0
+ %1:vecr(<4 x s32>) = G_CONSTANT_FOLD_BARRIER %0
+ $xmm1 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir
new file mode 100644
index 0000000..11251e4
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_freeze_vec256
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $ymm0
+
+ ; CHECK-LABEL: name: select_freeze_vec256
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $ymm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $ymm1
+ %0:vecr(<8 x s32>) = COPY $ymm0
+ %1:vecr(<8 x s32>) = G_FREEZE %0
+ $ymm1 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir
new file mode 100644
index 0000000..bcf299a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_freeze_vec512
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $zmm0
+
+ ; CHECK-LABEL: name: select_freeze_vec512
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $zmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $zmm1
+ %0:vecr(<8 x s64>) = COPY $zmm0
+ %1:vecr(<8 x s64>) = G_FREEZE %0
+ $zmm1 = COPY %1(<8 x s64>)
+ RET 0, implicit $zmm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir
new file mode 100644
index 0000000..cf5ad47
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir
@@ -0,0 +1,77 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+
+---
+name: select_freeze_scalar_s32
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $edi
+
+ ; CHECK-LABEL: name: select_freeze_scalar_s32
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $eax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $eax
+ %0:gpr(s32) = COPY $edi
+ %1:gpr(s32) = G_FREEZE %0
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
+...
+
+---
+name: select_freeze_scalar_s64
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi
+
+ ; CHECK-LABEL: name: select_freeze_scalar_s64
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $rax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $rax
+ %0:gpr(s64) = COPY $rdi
+ %1:gpr(s64) = G_FREEZE %0
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
+...
+
+
+---
+name: select_freeze_vec128
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $xmm0
+
+ ; CHECK-LABEL: name: select_freeze_vec128
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $xmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $xmm1
+ %0:vecr(<4 x s32>) = COPY $xmm0
+ %1:vecr(<4 x s32>) = G_FREEZE %0
+ $xmm1 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm1
+...
diff --git a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
index bf6b096..b428ce4 100644
--- a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
+++ b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
@@ -136,8 +136,6 @@ define void @_Z2x6v() local_unnamed_addr {
; CHECK-NEXT: movl (%r8), %edx
; CHECK-NEXT: leal 8(,%rbx,8), %eax
; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: leaq 8(%rsi), %rax
-; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: leaq 32(%rsi), %r11
; CHECK-NEXT: leaq 8(,%rbx,8), %rbx
; CHECK-NEXT: xorl %r14d, %r14d
@@ -189,7 +187,8 @@ define void @_Z2x6v() local_unnamed_addr {
; CHECK-NEXT: jae .LBB1_7
; CHECK-NEXT: # %bb.6: # %vector.memcheck
; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; CHECK-NEXT: leaq 8(%rsi), %r9
+; CHECK-NEXT: addq %r9, %rax
; CHECK-NEXT: leaq (%rax,%r10,8), %rax
; CHECK-NEXT: cmpq %r15, %rax
; CHECK-NEXT: ja .LBB1_14
diff --git a/llvm/test/CodeGen/X86/delete-dead-instrs-with-live-uses.mir b/llvm/test/CodeGen/X86/delete-dead-instrs-with-live-uses.mir
index 10ee445..d355374 100644
--- a/llvm/test/CodeGen/X86/delete-dead-instrs-with-live-uses.mir
+++ b/llvm/test/CodeGen/X86/delete-dead-instrs-with-live-uses.mir
@@ -7,8 +7,8 @@
# CHECK: jne
# CHECK: andl $-16, %edx
# CHECK: xorl %ebx, %ebx
-# CHECK: movl -16(%ebp), %esi
-# CHECK: xorl %eax, %eax
+# CHECK: xorl %esi, %esi
+# CHECK: movl %eax, %ecx
name: test
tracksRegLiveness: true
diff --git a/llvm/test/CodeGen/X86/inalloca-invoke.ll b/llvm/test/CodeGen/X86/inalloca-invoke.ll
index c2728f7..68cb24d 100644
--- a/llvm/test/CodeGen/X86/inalloca-invoke.ll
+++ b/llvm/test/CodeGen/X86/inalloca-invoke.ll
@@ -23,7 +23,6 @@ blah:
; CHECK: pushl %eax
; CHECK: subl $20, %esp
; CHECK: movl %esp, %[[beg:[^ ]*]]
-; CHECK: leal 12(%[[beg]]), %[[end:[^ ]*]]
call void @begin(ptr sret(%Iter) %temp.lvalue)
; CHECK: calll _begin
@@ -32,6 +31,7 @@ blah:
to label %invoke.cont unwind label %lpad
; Uses end as sret param.
+; CHECK: leal 12(%[[beg]]), %[[end:[^ ]*]]
; CHECK: pushl %[[end]]
; CHECK: calll _plus
diff --git a/llvm/test/CodeGen/X86/licm-regpressure.ll b/llvm/test/CodeGen/X86/licm-regpressure.ll
index 72a4832..26ed2a3 100644
--- a/llvm/test/CodeGen/X86/licm-regpressure.ll
+++ b/llvm/test/CodeGen/X86/licm-regpressure.ll
@@ -1,14 +1,64 @@
-; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
-; This tests currently fails as MachineLICM does not compute register pressure
-; correctly. More details: llvm.org/PR23143
-; XFAIL: *
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-linux -stop-after=early-machinelicm -o - | FileCheck %s
-; MachineLICM should take register pressure into account.
-; CHECK-NOT: Spill
+; FIXME: MachineLICM does not compute register pressure correctly and we end up
+; emitting too many ADD64ri32s. More details: llvm.org/PR23143
%struct.A = type { i32, i32, i32, i32, i32, i32, i32 }
define void @test(i1 %b, ptr %a) nounwind {
+ ; CHECK-LABEL: name: test
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $edi, $rsi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; CHECK-NEXT: [[ADD64ri32_:%[0-9]+]]:gr64 = nuw ADD64ri32 [[COPY]], 4, implicit-def dead $eflags
+ ; CHECK-NEXT: [[ADD64ri32_1:%[0-9]+]]:gr64 = nuw ADD64ri32 [[COPY]], 8, implicit-def dead $eflags
+ ; CHECK-NEXT: [[ADD64ri32_2:%[0-9]+]]:gr64 = nuw ADD64ri32 [[COPY]], 12, implicit-def dead $eflags
+ ; CHECK-NEXT: [[ADD64ri32_3:%[0-9]+]]:gr64 = nuw ADD64ri32 [[COPY]], 16, implicit-def dead $eflags
+ ; CHECK-NEXT: [[ADD64ri32_4:%[0-9]+]]:gr64 = nuw ADD64ri32 [[COPY]], 20, implicit-def dead $eflags
+ ; CHECK-NEXT: [[ADD64ri32_5:%[0-9]+]]:gr64 = nuw ADD64ri32 [[COPY]], 24, implicit-def dead $eflags
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop-body:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $rdi = COPY [[COPY]]
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @assign, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $rdi = COPY [[ADD64ri32_]]
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @assign, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $rdi = COPY [[ADD64ri32_1]]
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @assign, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $rdi = COPY [[ADD64ri32_2]]
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @assign, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $rdi = COPY [[ADD64ri32_3]]
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @assign, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $rdi = COPY [[ADD64ri32_4]]
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @assign, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $rdi = COPY [[ADD64ri32_5]]
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @assign, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: TEST8ri [[COPY2]], 1, implicit-def $eflags
+ ; CHECK-NEXT: JCC_1 %bb.1, 5, implicit $eflags
+ ; CHECK-NEXT: JMP_1 %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop-exit:
+ ; CHECK-NEXT: RET 0
entry:
br label %loop-header
diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
index 3349d31..b2064b1 100644
--- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
+++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
@@ -317,13 +317,13 @@ define void @with_nounwind(i1 %cond) nounwind personality ptr @my_personality {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB4_1: ## %throw
-; CHECK-NEXT: Ltmp0:
+; CHECK-NEXT: Ltmp0: ## EH_LABEL
; CHECK-NEXT: callq _throw_exception
-; CHECK-NEXT: Ltmp1:
+; CHECK-NEXT: Ltmp1: ## EH_LABEL
; CHECK-NEXT: ## %bb.2: ## %unreachable
; CHECK-NEXT: ud2
; CHECK-NEXT: LBB4_3: ## %landing
-; CHECK-NEXT: Ltmp2:
+; CHECK-NEXT: Ltmp2: ## EH_LABEL
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
; CHECK-NEXT: Lfunc_end0:
@@ -340,12 +340,12 @@ define void @with_nounwind(i1 %cond) nounwind personality ptr @my_personality {
; NOCOMPACTUNWIND-NEXT: retq
; NOCOMPACTUNWIND-NEXT: .LBB4_1: # %throw
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
-; NOCOMPACTUNWIND-NEXT: .Ltmp0:
+; NOCOMPACTUNWIND-NEXT: .Ltmp0: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: callq throw_exception@PLT
-; NOCOMPACTUNWIND-NEXT: .Ltmp1:
+; NOCOMPACTUNWIND-NEXT: .Ltmp1: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: # %bb.2: # %unreachable
; NOCOMPACTUNWIND-NEXT: .LBB4_3: # %landing
-; NOCOMPACTUNWIND-NEXT: .Ltmp2:
+; NOCOMPACTUNWIND-NEXT: .Ltmp2: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: popq %rax
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8
; NOCOMPACTUNWIND-NEXT: retq
@@ -379,9 +379,9 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; CHECK-NEXT: ## %bb.1: ## %throw
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: Ltmp3:
+; CHECK-NEXT: Ltmp3: ## EH_LABEL
; CHECK-NEXT: callq _throw_exception
-; CHECK-NEXT: Ltmp4:
+; CHECK-NEXT: Ltmp4: ## EH_LABEL
; CHECK-NEXT: LBB5_3: ## %fallthrough
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: nop
@@ -390,7 +390,7 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; CHECK-NEXT: LBB5_4: ## %return
; CHECK-NEXT: retq
; CHECK-NEXT: LBB5_2: ## %landing
-; CHECK-NEXT: Ltmp5:
+; CHECK-NEXT: Ltmp5: ## EH_LABEL
; CHECK-NEXT: jmp LBB5_3
; CHECK-NEXT: Lfunc_end1:
;
@@ -401,9 +401,9 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; NOCOMPACTUNWIND-NEXT: # %bb.1: # %throw
; NOCOMPACTUNWIND-NEXT: pushq %rax
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
-; NOCOMPACTUNWIND-NEXT: .Ltmp3:
+; NOCOMPACTUNWIND-NEXT: .Ltmp3: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: callq throw_exception@PLT
-; NOCOMPACTUNWIND-NEXT: .Ltmp4:
+; NOCOMPACTUNWIND-NEXT: .Ltmp4: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: .LBB5_3: # %fallthrough
; NOCOMPACTUNWIND-NEXT: #APP
; NOCOMPACTUNWIND-NEXT: nop
@@ -414,7 +414,7 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; NOCOMPACTUNWIND-NEXT: retq
; NOCOMPACTUNWIND-NEXT: .LBB5_2: # %landing
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
-; NOCOMPACTUNWIND-NEXT: .Ltmp5:
+; NOCOMPACTUNWIND-NEXT: .Ltmp5: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: jmp .LBB5_3
entry:
br i1 %cond, label %throw, label %return
diff --git a/llvm/test/MC/AMDGPU/vop3-gfx9.s b/llvm/test/MC/AMDGPU/vop3-gfx9.s
index f98f33a..50a7433 100644
--- a/llvm/test/MC/AMDGPU/vop3-gfx9.s
+++ b/llvm/test/MC/AMDGPU/vop3-gfx9.s
@@ -566,6 +566,141 @@ v_interp_p2_f16 v5, v2, attr0.x, v3 clamp
// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
// VI: v_interp_p2_f16 v5, v2, attr0.x, v3 clamp ; encoding: [0x05,0x80,0x76,0xd2,0x00,0x04,0x0e,0x04]
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 ; encoding: [0x05,0x00,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,1,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 ; encoding: [0x05,0x00,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,1,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,0] ; encoding: [0x05,0x28,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,1,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,0] ; encoding: [0x05,0x28,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,1,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,1,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,0] ; encoding: [0x05,0x28,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,0,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 ; encoding: [0x05,0x00,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,0,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,0,1] ; encoding: [0x05,0x40,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,1] ; encoding: [0x05,0x60,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,1,0,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 ; encoding: [0x05,0x00,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,1,0,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,0,1] ; encoding: [0x05,0x40,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,1,1,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,1,1,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,1] ; encoding: [0x05,0x60,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,1] ; encoding: [0x05,0x48,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,0] ; encoding: [0x05,0x28,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,1] ; encoding: [0x05,0x68,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,1,0,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,1,0,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,1] ; encoding: [0x05,0x48,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,1,1,0]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,0] ; encoding: [0x05,0x28,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
+v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,1,1,1]
+// GFX9: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,1] ; encoding: [0x05,0x68,0x77,0xd2,0x00,0x04,0x0e,0x04]
+// NOSICI: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// NOVI: :[[@LINE-3]]:{{[0-9]+}}: error: not a valid operand.
+
v_interp_p2_legacy_f16 v5, v2, attr31.x, v3
// GFX9: v_interp_p2_legacy_f16 v5, v2, attr31.x, v3 ; encoding: [0x05,0x00,0x76,0xd2,0x1f,0x04,0x0e,0x04]
// NOGCN: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3.txt
index 802d6368..60f058d 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3.txt
@@ -19311,6 +19311,27 @@
# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 clamp ; encoding: [0x05,0x80,0x77,0xd2,0x00,0x04,0x0e,0x04]
0x05,0x80,0x77,0xd2,0x00,0x04,0x0e,0x04
+# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,0,1] ; encoding: [0x05,0x40,0x77,0xd2,0x00,0x04,0x0e,0x04]
+0x05,0x40,0x77,0xd2,0x00,0x04,0x0e,0x04
+
+# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x77,0xd2,0x00,0x04,0x0e,0x04]
+0x05,0x20,0x77,0xd2,0x00,0x04,0x0e,0x04
+
+# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[0,0,1,1] ; encoding: [0x05,0x60,0x77,0xd2,0x00,0x04,0x0e,0x04]
+0x05,0x60,0x77,0xd2,0x00,0x04,0x0e,0x04
+
+# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x77,0xd2,0x00,0x04,0x0e,0x04]
+0x05,0x08,0x77,0xd2,0x00,0x04,0x0e,0x04
+
+# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,0,1] ; encoding: [0x05,0x48,0x77,0xd2,0x00,0x04,0x0e,0x04]
+0x05,0x48,0x77,0xd2,0x00,0x04,0x0e,0x04
+
+# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,0] ; encoding: [0x05,0x28,0x77,0xd2,0x00,0x04,0x0e,0x04]
+0x05,0x28,0x77,0xd2,0x00,0x04,0x0e,0x04
+
+# CHECK: v_interp_p2_f16 v5, v2, attr0.x, v3 op_sel:[1,0,1,1] ; encoding: [0x05,0x68,0x77,0xd2,0x00,0x04,0x0e,0x04]
+0x05,0x68,0x77,0xd2,0x00,0x04,0x0e,0x04
+
# CHECK: v_add_f64 v[5:6], v[1:2], v[2:3] ; encoding: [0x05,0x00,0x80,0xd2,0x01,0x05,0x02,0x00]
0x05,0x00,0x80,0xd2,0x01,0x05,0x02,0x00
diff --git a/llvm/test/Other/new-pm-lto-defaults.ll b/llvm/test/Other/new-pm-lto-defaults.ll
index 3aea0f2..f595dfe 100644
--- a/llvm/test/Other/new-pm-lto-defaults.ll
+++ b/llvm/test/Other/new-pm-lto-defaults.ll
@@ -67,6 +67,7 @@
; CHECK-O1-NEXT: Running analysis: TargetLibraryAnalysis
; CHECK-O-NEXT: Running pass: GlobalSplitPass
; CHECK-O-NEXT: Running pass: WholeProgramDevirtPass
+; CHECK-O-NEXT: Running pass: NoRecurseLTOInferencePass
; CHECK-O23SZ-NEXT: Running pass: CoroEarlyPass
; CHECK-O1-NEXT: Running pass: LowerTypeTestsPass
; CHECK-O23SZ-NEXT: Running pass: GlobalOptPass
diff --git a/llvm/test/ThinLTO/X86/memprof-basic.ll b/llvm/test/ThinLTO/X86/memprof-basic.ll
index 0ff0ce0..537e1b8 100644
--- a/llvm/test/ThinLTO/X86/memprof-basic.ll
+++ b/llvm/test/ThinLTO/X86/memprof-basic.ll
@@ -103,7 +103,9 @@ declare i32 @sleep()
define internal ptr @_Z3barv() #0 !dbg !15 {
entry:
- %call = call ptr @_Znam(i64 0), !memprof !2, !callsite !7
+ ;; Use an ambiguous attribute for this allocation, which is now added to such
+ ;; allocations during matching. It should not affect cloning.
+ %call = call ptr @_Znam(i64 0) #1, !memprof !2, !callsite !7
ret ptr null
}
@@ -125,6 +127,7 @@ entry:
uselistorder ptr @_Z3foov, { 1, 0 }
attributes #0 = { noinline optnone }
+attributes #1 = { "memprof"="ambiguous" }
!llvm.dbg.cu = !{!13}
!llvm.module.flags = !{!20, !21}
diff --git a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll
index d0e7c1c2..e1e1611 100644
--- a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll
+++ b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll
@@ -80,8 +80,8 @@ cleanup2:
; CHECK: cleanup2.corodispatch:
; CHECK: %1 = phi i8 [ 0, %handler2 ], [ 1, %catch.dispatch.2 ]
; CHECK: %2 = cleanuppad within %h1 []
-; CHECK: %switch = icmp ult i8 %1, 1
-; CHECK: br i1 %switch, label %cleanup2.from.handler2, label %cleanup2.from.catch.dispatch.2
+; CHECK: %3 = icmp eq i8 %1, 0
+; CHECK: br i1 %3, label %cleanup2.from.handler2, label %cleanup2.from.catch.dispatch.2
; CHECK: cleanup2.from.handler2:
; CHECK: %valueB.reload = load i32, ptr %valueB.spill.addr, align 4
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_address_taken.ll b/llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_address_taken.ll
new file mode 100644
index 0000000..bcdf75b
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_address_taken.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 5
+; RUN: opt < %s -passes=norecurse-lto-inference -S | FileCheck %s
+
+; This test includes a call to a library function which is not marked as
+; NoCallback. Function bob() does not have internal linkage and hence prevents
+; norecurse to be added.
+
+@.str = private unnamed_addr constant [12 x i8] c"Hello World\00", align 1
+
+;.
+; CHECK: @.str = private unnamed_addr constant [12 x i8] c"Hello World\00", align 1
+;.
+define dso_local void @bob() {
+; CHECK-LABEL: define dso_local void @bob() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
+; CHECK-NEXT: ret void
+;
+entry:
+ %call = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
+ ret void
+}
+
+declare i32 @printf(ptr readonly captures(none), ...)
+
+define dso_local i32 @main() norecurse {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define dso_local i32 @main(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bob()
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ tail call void @bob()
+ ret i32 0
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { norecurse }
+;.
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_no_address_taken.ll b/llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_no_address_taken.ll
new file mode 100644
index 0000000..a03b4ca
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse_libfunc_no_address_taken.ll
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 5
+; RUN: opt < %s -passes=norecurse-lto-inference -S | FileCheck %s
+
+; This test includes a call to a library function which is not marked as
+; NoCallback. All functions except main() are internal and main is marked
+; norecurse, so as to not block norecurse to be added to bob().
+
+@.str = private unnamed_addr constant [12 x i8] c"Hello World\00", align 1
+
+; Function Attrs: nofree noinline nounwind uwtable
+;.
+; CHECK: @.str = private unnamed_addr constant [12 x i8] c"Hello World\00", align 1
+;.
+define internal void @bob() {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define internal void @bob(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
+; CHECK-NEXT: ret void
+;
+entry:
+ %call = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
+ ret void
+}
+
+; Function Attrs: nofree nounwind
+declare i32 @printf(ptr readonly captures(none), ...)
+
+; Function Attrs: nofree norecurse nounwind uwtable
+define dso_local i32 @main() norecurse {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define dso_local i32 @main(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bob()
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ tail call void @bob()
+ ret i32 0
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { norecurse }
+;.
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse_lto.ll b/llvm/test/Transforms/FunctionAttrs/norecurse_lto.ll
new file mode 100644
index 0000000..5be707b
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse_lto.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 5
+; RUN: opt < %s -passes=norecurse-lto-inference -S | FileCheck %s
+
+; This test includes a call graph which has a recursive function(foo2) which
+; calls a non-recursive internal function (foo3) satisfying the norecurse
+; attribute criteria.
+
+
+define internal void @foo3() {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define internal void @foo3(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
+define internal i32 @foo2(i32 %accum, i32 %n) {
+; CHECK-LABEL: define internal i32 @foo2(
+; CHECK-SAME: i32 [[ACCUM:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[N]], 0
+; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[RECURSE:.*]]
+; CHECK: [[RECURSE]]:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[N]], 1
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[ACCUM]], [[SUB]]
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @foo2(i32 [[MUL]], i32 [[SUB]])
+; CHECK-NEXT: call void @foo3()
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ACCUM]], %[[ENTRY]] ], [ [[CALL]], %[[RECURSE]] ]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cmp = icmp eq i32 %n, 0
+ br i1 %cmp, label %exit, label %recurse
+
+recurse:
+ %sub = sub i32 %n, 1
+ %mul = mul i32 %accum, %sub
+ %call = call i32 @foo2(i32 %mul, i32 %sub)
+ call void @foo3()
+ br label %exit
+
+exit:
+ %res = phi i32 [ %accum, %entry ], [ %call, %recurse ]
+ ret i32 %res
+}
+
+define internal i32 @foo1() {
+; CHECK-LABEL: define internal i32 @foo1() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @foo2(i32 1, i32 5)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @foo2(i32 1, i32 5)
+ ret i32 %res
+}
+
+define dso_local i32 @main() {
+; CHECK-LABEL: define dso_local i32 @main() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @foo1()
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @foo1()
+ ret i32 %res
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { norecurse }
+;.
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion.ll b/llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion.ll
new file mode 100644
index 0000000..e351f60
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion.ll
@@ -0,0 +1,141 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 5
+; RUN: opt < %s -passes=norecurse-lto-inference -S | FileCheck %s
+
+; This test includes a call graph with multiple SCCs. The purpose of this is
+; to check that norecurse is not added when a function is part of non-singular
+; SCC.
+; There are three different SCCs in this test:
+; SCC#1: f1, foo, bar, foo1, bar1
+; SCC#2: bar2, bar3, bar4
+; SCC#3: baz, fun
+; None of these functions should be marked as norecurse
+
+define internal void @bar1() {
+; CHECK-LABEL: define internal void @bar1() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @f1()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @f1()
+ ret void
+}
+
+define internal void @f1() {
+; CHECK-LABEL: define internal void @f1() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @foo()
+; CHECK-NEXT: tail call void @bar2()
+; CHECK-NEXT: tail call void @baz()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @foo()
+ tail call void @bar2()
+ tail call void @baz()
+ ret void
+}
+
+define dso_local i32 @main() norecurse {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define dso_local i32 @main(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @f1()
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ tail call void @f1()
+ ret i32 0
+}
+
+define internal void @foo1() {
+; CHECK-LABEL: define internal void @foo1() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar1()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar1()
+ ret void
+}
+
+define internal void @bar() {
+; CHECK-LABEL: define internal void @bar() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @foo1()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @foo1()
+ ret void
+}
+
+define internal void @foo() {
+; CHECK-LABEL: define internal void @foo() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar()
+ ret void
+}
+
+define internal void @bar4() {
+; CHECK-LABEL: define internal void @bar4() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar2()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar2()
+ ret void
+}
+
+define internal void @bar2() {
+; CHECK-LABEL: define internal void @bar2() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar3()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar3()
+ ret void
+}
+
+define internal void @bar3() {
+; CHECK-LABEL: define internal void @bar3() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar4()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar4()
+ ret void
+}
+
+define internal void @fun() {
+; CHECK-LABEL: define internal void @fun() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @baz()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @baz()
+ ret void
+}
+
+define internal void @baz() {
+; CHECK-LABEL: define internal void @baz() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @fun()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @fun()
+ ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { norecurse }
+;.
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion1.ll b/llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion1.ll
new file mode 100644
index 0000000..cd94037
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse_multi_scc_indirect_recursion1.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 5
+; RUN: opt < %s -passes=norecurse-lto-inference -S | FileCheck %s
+
+; This test includes a call graph with multiple SCCs. The purpose of this is
+; to check that norecurse is added to a function which calls a function which
+; is indirectly recursive but is not part of the recursive chain.
+; There are two SCCs in this test:
+; SCC#1: bar2, bar3, bar4
+; SCC#2: baz, fun
+; f1() calls bar2 and baz, both of which are part of some indirect recursive
+; chain. but does not call back f1() and hence f1() can be marked as
+; norecurse.
+
+define dso_local i32 @main() norecurse {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define dso_local i32 @main(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @f1()
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ tail call void @f1()
+ ret i32 0
+}
+
+define internal void @f1() {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define internal void @f1(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar2()
+; CHECK-NEXT: tail call void @baz()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar2()
+ tail call void @baz()
+ ret void
+}
+
+define internal void @bar4() {
+; CHECK-LABEL: define internal void @bar4() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar2()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar2()
+ ret void
+}
+
+define internal void @bar2() {
+; CHECK-LABEL: define internal void @bar2() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar3()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar3()
+ ret void
+}
+
+define internal void @bar3() {
+; CHECK-LABEL: define internal void @bar3() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bar4()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @bar4()
+ ret void
+}
+
+define internal void @fun() {
+; CHECK-LABEL: define internal void @fun() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @baz()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @baz()
+ ret void
+}
+
+define internal void @baz() {
+; CHECK-LABEL: define internal void @baz() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @fun()
+; CHECK-NEXT: ret void
+;
+entry:
+ tail call void @fun()
+ ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { norecurse }
+;.
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse_multinode_refscc.ll b/llvm/test/Transforms/FunctionAttrs/norecurse_multinode_refscc.ll
new file mode 100644
index 0000000..8b81a90
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse_multinode_refscc.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 5
+; RUN: opt -passes=norecurse-lto-inference -S %s | FileCheck %s
+
+; This is a negative test which results in RefSCC with size > 1.
+; RefSCC : [(f2), (f1)]
+; --- SCC A (f1) --- size() = 1
+define internal void @f1() {
+; CHECK-LABEL: define internal void @f1() {
+; CHECK-NEXT: call void @f2()
+; CHECK-NEXT: ret void
+;
+ call void @f2()
+ ret void
+}
+
+; --- SCC B (f2) --- size() = 1
+; f2 indirectly calls f1 using locally allocated function pointer
+define internal void @f2() {
+; CHECK-LABEL: define internal void @f2() {
+; CHECK-NEXT: [[FP:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store ptr @f1, ptr [[FP]], align 8
+; CHECK-NEXT: [[TMP:%.*]] = load ptr, ptr [[FP]], align 8
+; CHECK-NEXT: call void [[TMP]]()
+; CHECK-NEXT: ret void
+;
+ %fp = alloca void ()*
+ store void ()* @f1, void ()** %fp
+ %tmp = load void ()*, void ()** %fp
+ call void %tmp()
+ ret void
+}
+
+define i32 @main() {
+; CHECK-LABEL: define i32 @main() {
+; CHECK-NEXT: call void @f1()
+; CHECK-NEXT: ret i32 0
+;
+ call void @f1()
+ ret i32 0
+}
+
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse_self_recursive_callee.ll b/llvm/test/Transforms/FunctionAttrs/norecurse_self_recursive_callee.ll
new file mode 100644
index 0000000..461e5df
--- /dev/null
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse_self_recursive_callee.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 5
+; RUN: opt < %s -passes=norecurse-lto-inference -S | FileCheck %s
+
+; This test includes a call graph with a self recursive function.
+; The purpose of this is to check that norecurse is added to functions
+; which have a self-recursive function in the call-chain.
+; The call-chain in this test is as follows
+; main -> bob -> callee1 -> callee2
+; where callee2 is self recursive.
+
+@x = dso_local global i32 4, align 4
+@y = dso_local global i32 2, align 4
+
+;.
+; CHECK: @x = dso_local global i32 4, align 4
+; CHECK: @y = dso_local global i32 2, align 4
+;.
+define internal void @callee2() {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define internal void @callee2(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @y, align 4
+; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
+; CHECK-NEXT: store volatile i32 [[INC]], ptr @y, align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load volatile i32, ptr @y, align 4
+ %inc = add nsw i32 %0, 1
+ store volatile i32 %inc, ptr @y, align 4
+ ret void
+}
+
+define internal void @callee1(i32 %x) {
+; CHECK-LABEL: define internal void @callee1(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; CHECK-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: tail call void @callee1(i32 [[X]])
+; CHECK-NEXT: br label %[[IF_END]]
+; CHECK: [[IF_END]]:
+; CHECK-NEXT: tail call void @callee2()
+; CHECK-NEXT: ret void
+;
+entry:
+ %cmp = icmp sgt i32 %x, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void @callee1(i32 %x)
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ tail call void @callee2()
+ ret void
+}
+
+define internal void @bob() {
+; CHECK-LABEL: define internal void @bob() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @x, align 4
+; CHECK-NEXT: tail call void @callee2(i32 [[TMP0]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load volatile i32, ptr @x, align 4
+ tail call void @callee2(i32 %0)
+ ret void
+}
+
+define dso_local i32 @main() norecurse {
+; CHECK: Function Attrs: norecurse
+; CHECK-LABEL: define dso_local i32 @main(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: tail call void @bob()
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ tail call void @bob()
+ ret i32 0
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { norecurse }
+;.
diff --git a/llvm/test/Transforms/InstCombine/clamp-to-minmax.ll b/llvm/test/Transforms/InstCombine/clamp-to-minmax.ll
index 7f32766..0ccaa9c 100644
--- a/llvm/test/Transforms/InstCombine/clamp-to-minmax.ll
+++ b/llvm/test/Transforms/InstCombine/clamp-to-minmax.ll
@@ -172,10 +172,8 @@ define float @clamp_negative_wrong_const(float %x) {
; Like @clamp_test_1 but both are min
define float @clamp_negative_same_op(float %x) {
; CHECK-LABEL: @clamp_negative_same_op(
-; CHECK-NEXT: [[INNER_CMP_INV:%.*]] = fcmp fast oge float [[X:%.*]], 2.550000e+02
-; CHECK-NEXT: [[INNER_SEL:%.*]] = select nnan ninf i1 [[INNER_CMP_INV]], float 2.550000e+02, float [[X]]
-; CHECK-NEXT: [[OUTER_CMP:%.*]] = fcmp fast ult float [[X]], 1.000000e+00
-; CHECK-NEXT: [[R:%.*]] = select i1 [[OUTER_CMP]], float [[INNER_SEL]], float 1.000000e+00
+; CHECK-NEXT: [[OUTER_CMP_INV:%.*]] = fcmp fast oge float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select nnan ninf i1 [[OUTER_CMP_INV]], float 1.000000e+00, float [[X]]
; CHECK-NEXT: ret float [[R]]
;
%inner_cmp = fcmp fast ult float %x, 255.0
diff --git a/llvm/test/Transforms/InstCombine/select-gep.ll b/llvm/test/Transforms/InstCombine/select-gep.ll
index dd8dffb..7181336 100644
--- a/llvm/test/Transforms/InstCombine/select-gep.ll
+++ b/llvm/test/Transforms/InstCombine/select-gep.ll
@@ -286,3 +286,35 @@ define <2 x ptr> @test7(<2 x ptr> %p1, i64 %idx, <2 x i1> %cc) {
%select = select <2 x i1> %cc, <2 x ptr> %p1, <2 x ptr> %gep
ret <2 x ptr> %select
}
+
+define ptr @ptr_eq_replace_freeze1(ptr %p, ptr %q) {
+; CHECK-LABEL: @ptr_eq_replace_freeze1(
+; CHECK-NEXT: [[Q_FR:%.*]] = freeze ptr [[Q:%.*]]
+; CHECK-NEXT: [[Q_FR1:%.*]] = freeze ptr [[Q1:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Q_FR]], [[Q_FR1]]
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], ptr [[Q_FR]], ptr [[Q_FR1]]
+; CHECK-NEXT: ret ptr [[SELECT]]
+;
+ %p.fr = freeze ptr %p
+ %q.fr = freeze ptr %q
+ %cmp = icmp eq ptr %p.fr, %q.fr
+ %select = select i1 %cmp, ptr %p.fr, ptr %q.fr
+ ret ptr %select
+}
+
+define ptr @ptr_eq_replace_freeze2(ptr %p, ptr %q) {
+; CHECK-LABEL: @ptr_eq_replace_freeze2(
+; CHECK-NEXT: [[P_FR:%.*]] = freeze ptr [[P:%.*]]
+; CHECK-NEXT: [[P_FR1:%.*]] = freeze ptr [[P1:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P_FR1]], [[P_FR]]
+; CHECK-NEXT: [[SELECT_V:%.*]] = select i1 [[CMP]], ptr [[P_FR1]], ptr [[P_FR]]
+; CHECK-NEXT: [[SELECT:%.*]] = getelementptr i8, ptr [[SELECT_V]], i64 16
+; CHECK-NEXT: ret ptr [[SELECT]]
+;
+ %gep1 = getelementptr i32, ptr %p, i64 4
+ %gep2 = getelementptr i32, ptr %q, i64 4
+ %cmp = icmp eq ptr %p, %q
+ %cmp.fr = freeze i1 %cmp
+ %select = select i1 %cmp.fr, ptr %gep1, ptr %gep2
+ ret ptr %select
+}
diff --git a/llvm/test/Transforms/InstCombine/select-safe-bool-transforms.ll b/llvm/test/Transforms/InstCombine/select-safe-bool-transforms.ll
index 9de9150..8b0a5ca 100644
--- a/llvm/test/Transforms/InstCombine/select-safe-bool-transforms.ll
+++ b/llvm/test/Transforms/InstCombine/select-safe-bool-transforms.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; TODO: All of these should be optimized to less than or equal to a single
@@ -7,13 +7,13 @@
; --- (A op B) op' A / (B op A) op' A ---
; (A land B) land A
-define i1 @land_land_left1(i1 %A, i1 %B) {
+define i1 @land_land_left1(i1 %A, i1 %B) !prof !0 {
; CHECK-LABEL: @land_land_left1(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[A:%.*]], i1 [[B:%.*]], i1 false
+; CHECK-NEXT: [[C:%.*]] = select i1 [[A:%.*]], i1 [[B:%.*]], i1 false, !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret i1 [[C]]
;
- %c = select i1 %A, i1 %B, i1 false
- %res = select i1 %c, i1 %A, i1 false
+ %c = select i1 %A, i1 %B, i1 false, !prof !1
+ %res = select i1 %c, i1 %A, i1 false, !prof !2
ret i1 %res
}
define i1 @land_land_left2(i1 %A, i1 %B) {
@@ -157,13 +157,13 @@ define i1 @lor_band_left2(i1 %A, i1 %B) {
}
; (A lor B) lor A
-define i1 @lor_lor_left1(i1 %A, i1 %B) {
+define i1 @lor_lor_left1(i1 %A, i1 %B) !prof !0 {
; CHECK-LABEL: @lor_lor_left1(
-; CHECK-NEXT: [[C:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[B:%.*]], !prof [[PROF1]]
; CHECK-NEXT: ret i1 [[C]]
;
- %c = select i1 %A, i1 true, i1 %B
- %res = select i1 %c, i1 true, i1 %A
+ %c = select i1 %A, i1 true, i1 %B, !prof !1
+ %res = select i1 %c, i1 true, i1 %A, !prof !2
ret i1 %res
}
define i1 @lor_lor_left2(i1 %A, i1 %B) {
@@ -506,3 +506,12 @@ define <2 x i1> @PR50500_falseval(<2 x i1> %a, <2 x i1> %b) {
%r = select <2 x i1> %a, <2 x i1> %b, <2 x i1> %s
ret <2 x i1> %r
}
+
+!0 = !{!"function_entry_count", i64 1000}
+!1 = !{!"branch_weights", i32 2, i32 3}
+!2 = !{!"branch_weights", i32 5, i32 7}
+
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 2, i32 3}
+;.
diff --git a/llvm/test/Transforms/InstSimplify/domcondition.ll b/llvm/test/Transforms/InstSimplify/domcondition.ll
index 43be5de..2893bb1 100644
--- a/llvm/test/Transforms/InstSimplify/domcondition.ll
+++ b/llvm/test/Transforms/InstSimplify/domcondition.ll
@@ -278,3 +278,210 @@ end:
}
declare void @foo(i32)
+
+
+define i1 @simplify_fcmp_implied_by_dom_cond_range_true(float %x) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_range_true(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 true
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, 0.0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp olt float %x, 1.0
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+define i1 @simplify_fcmp_in_else_implied_by_dom_cond_range_true(float %x) {
+; CHECK-LABEL: @simplify_fcmp_in_else_implied_by_dom_cond_range_true(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 true
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 true
+;
+ %cmp = fcmp olt float %x, 1.0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ ret i1 true
+
+if.else:
+ %cmp2 = fcmp uge float %x, 0.5
+ ret i1 %cmp2
+}
+
+define i1 @simplify_fcmp_implied_by_dom_cond_range_false(float %x) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_range_false(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, 0.0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp ogt float %x, 1.0
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+define i1 @simplify_fcmp_implied_by_dom_cond_pred_true(float %x, float %y) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_pred_true(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 true
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, %y
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp ole float %x, %y
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+define i1 @simplify_fcmp_implied_by_dom_cond_pred_false(float %x, float %y) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_pred_false(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 false
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, %y
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp ogt float %x, %y
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+define i1 @simplify_fcmp_implied_by_dom_cond_pred_commuted(float %x, float %y) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_pred_commuted(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 true
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, %y
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp oge float %y, %x
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+; Negative tests
+
+define i1 @simplify_fcmp_implied_by_dom_cond_wrong_range(float %x) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_wrong_range(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp olt float [[X]], -1.000000e+00
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, 0.0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp olt float %x, -1.0
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+define i1 @simplify_fcmp_implied_by_dom_cond_range_mismatched_operand(float %x, float %y) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_range_mismatched_operand(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], 0.000000e+00
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp olt float [[Y:%.*]], 1.000000e+00
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, 0.0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp olt float %y, 1.0
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+define i1 @simplify_fcmp_implied_by_dom_cond_wrong_pred(float %x, float %y) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_wrong_pred(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ole float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp olt float [[X]], [[Y]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp ole float %x, %y
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp olt float %x, %y
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
+
+define i1 @simplify_fcmp_implied_by_dom_cond_pred_mismatched_operand(float %x, float %y, float %z) {
+; CHECK-LABEL: @simplify_fcmp_implied_by_dom_cond_pred_mismatched_operand(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ole float [[X]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 false
+;
+ %cmp = fcmp olt float %x, %y
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = fcmp ole float %x, %z
+ ret i1 %cmp2
+
+if.else:
+ ret i1 false
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/neon-inloop-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/neon-inloop-reductions.ll
new file mode 100644
index 0000000..22696d0
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/neon-inloop-reductions.ll
@@ -0,0 +1,121 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -p loop-vectorize -prefer-inloop-reductions -mcpu=apple-m1 -force-vector-interleave=1 -S %s | FileCheck %s
+
+target triple = "arm64-apple-macosx"
+
+define i32 @mul_used_outside_vpexpression(ptr %src.0, ptr %src.1) {
+; CHECK-LABEL: define i32 @mul_used_outside_vpexpression(
+; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ITER_CHECK:.*]]:
+; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[SRC_1]], i64 1
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC_0]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[TMP1]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
+; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]])
+; CHECK-NEXT: [[TMP6]] = add i32 [[VEC_PHI]], [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[TMP4]])
+; CHECK-NEXT: [[TMP8]] = or i32 [[VEC_PHI1]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 false, label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC_0]], i64 96
+; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
+; CHECK: [[VEC_EPILOG_PH]]:
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi i32 [ [[TMP8]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[SRC_0]], i64 100
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[SRC_1]], i64 1
+; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX3:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[VEC_EPILOG_PH]] ], [ [[TMP17:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi i32 [ [[BC_MERGE_RDX2]], %[[VEC_EPILOG_PH]] ], [ [[TMP19:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[SRC_0]], i64 [[INDEX3]]
+; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i8>, ptr [[NEXT_GEP6]], align 1
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <4 x i8> poison, i8 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT8]], <4 x i8> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = zext <4 x i8> [[WIDE_LOAD7]] to <4 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = zext <4 x i8> [[BROADCAST_SPLAT9]] to <4 x i32>
+; CHECK-NEXT: [[TMP15:%.*]] = mul <4 x i32> [[TMP13]], [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP15]])
+; CHECK-NEXT: [[TMP17]] = add i32 [[VEC_PHI4]], [[TMP16]]
+; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP15]])
+; CHECK-NEXT: [[TMP19]] = or i32 [[VEC_PHI5]], [[TMP18]]
+; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX3]], 4
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT10]], 100
+; CHECK-NEXT: br i1 [[TMP20]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 false, label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 100, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL11:%.*]] = phi ptr [ [[TMP10]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[SRC_0]], %[[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX12:%.*]] = phi i32 [ [[TMP17]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP6]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX13:%.*]] = phi i32 [ [[TMP19]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL11]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[GEP_0:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RED_0:%.*]] = phi i32 [ [[BC_MERGE_RDX12]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[RED_0_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RED_1:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[RED_1_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_0]] = getelementptr i8, ptr [[PTR_IV]], i64 1
+; CHECK-NEXT: [[L_0:%.*]] = load i8, ptr [[PTR_IV]], align 1
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i8, ptr [[SRC_1]], i64 1
+; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_1]], align 1
+; CHECK-NEXT: [[L_0_EXT:%.*]] = zext i8 [[L_0]] to i32
+; CHECK-NEXT: [[L_1_EXT:%.*]] = zext i8 [[L_1]] to i32
+; CHECK-NEXT: [[MUL_EXT_LL:%.*]] = mul i32 [[L_0_EXT]], [[L_1_EXT]]
+; CHECK-NEXT: [[RED_1_NEXT]] = or i32 [[MUL_EXT_LL]], [[RED_1]]
+; CHECK-NEXT: [[RED_0_NEXT]] = add i32 [[MUL_EXT_LL]], [[RED_0]]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 101
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RED_1_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_1_NEXT]], %[[LOOP]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ [[TMP19]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RED_0_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_0_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ [[TMP17]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[RED_1_NEXT_LCSSA]], [[RED_0_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %ptr.iv = phi ptr [ %src.0, %entry ], [ %gep.0, %loop ]
+ %red.0 = phi i32 [ 0, %entry ], [ %red.0.next, %loop ]
+ %red.1 = phi i32 [ 0, %entry ], [ %red.1.next, %loop ]
+ %gep.0 = getelementptr i8, ptr %ptr.iv, i64 1
+ %l.0 = load i8, ptr %ptr.iv, align 1
+ %gep.1 = getelementptr i8, ptr %src.1, i64 1
+ %l.1 = load i8, ptr %gep.1, align 1
+ %l.0.ext = zext i8 %l.0 to i32
+ %l.1.ext = zext i8 %l.1 to i32
+ %mul.ext.ll = mul i32 %l.0.ext, %l.1.ext
+ %red.1.next = or i32 %mul.ext.ll, %red.1
+ %red.0.next = add i32 %mul.ext.ll, %red.0
+ %iv.next = add i32 %iv, 1
+ %ec = icmp eq i32 %iv, 101
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %res = add i32 %red.1.next, %red.0.next
+ ret i32 %res
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr162009.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr162009.ll
new file mode 100644
index 0000000..6095b24
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr162009.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -enable-epilogue-vectorization=false -S < %s | FileCheck %s --check-prefixes=CHECK-NO-PARTIAL-REDUCTION
+
+target triple = "aarch64"
+
+define i128 @add_reduc_i32_i128_unsupported(ptr %a, ptr %b) "target-features"="+dotprod" {
+; CHECK-NO-PARTIAL-REDUCTION-LABEL: define i128 @add_reduc_i32_i128_unsupported(
+; CHECK-NO-PARTIAL-REDUCTION-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[ENTRY:.*:]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK-NO-PARTIAL-REDUCTION: [[VECTOR_PH]]:
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK-NO-PARTIAL-REDUCTION: [[VECTOR_BODY]]:
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[VEC_PHI:%.*]] = phi <4 x i128> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 1
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP1:%.*]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 1
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP3:%.*]] = zext <4 x i32> [[WIDE_LOAD1]] to <4 x i64>
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP4:%.*]] = mul nuw <4 x i64> [[TMP1]], [[TMP3]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP5:%.*]] = zext <4 x i64> [[TMP4]] to <4 x i128>
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP7]] = add <4 x i128> [[VEC_PHI]], [[TMP5]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4024
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NO-PARTIAL-REDUCTION: [[MIDDLE_BLOCK]]:
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[TMP8:%.*]] = call i128 @llvm.vector.reduce.add.v4i128(<4 x i128> [[TMP7]])
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: br label %[[SCALAR_PH:.*]]
+; CHECK-NO-PARTIAL-REDUCTION: [[SCALAR_PH]]:
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK-NO-PARTIAL-REDUCTION: [[FOR_BODY]]:
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[IV:%.*]] = phi i64 [ 4024, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[ACCUM:%.*]] = phi i128 [ [[TMP8]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[GEP_A]], align 1
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[EXT_A:%.*]] = zext i32 [[LOAD_A]] to i64
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[GEP_B:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 1
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[EXT_B:%.*]] = zext i32 [[LOAD_B]] to i64
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[MUL:%.*]] = mul nuw i64 [[EXT_A]], [[EXT_B]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[MUL_ZEXT:%.*]] = zext i64 [[MUL]] to i128
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[ADD]] = add i128 [[ACCUM]], [[MUL_ZEXT]]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4025
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_EXIT:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NO-PARTIAL-REDUCTION: [[FOR_EXIT]]:
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: [[ADD_LCSSA:%.*]] = phi i128 [ [[ADD]], %[[FOR_BODY]] ]
+; CHECK-NO-PARTIAL-REDUCTION-NEXT: ret i128 [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %accum = phi i128 [ 0, %entry ], [ %add, %for.body ]
+ %gep.a = getelementptr i32, ptr %a, i64 %iv
+ %load.a = load i32, ptr %gep.a, align 1
+ %ext.a = zext i32 %load.a to i64
+ %gep.b = getelementptr i32, ptr %b, i64 %iv
+ %load.b = load i32, ptr %gep.b, align 1
+ %ext.b = zext i32 %load.b to i64
+ %mul = mul nuw i64 %ext.a, %ext.b
+ %mul.zext = zext i64 %mul to i128
+ %add = add i128 %accum, %mul.zext
+ %iv.next = add i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 4025
+ br i1 %exitcond.not, label %for.exit, label %for.body
+
+for.exit:
+ ret i128 %add
+}
+;.
+; CHECK-NO-PARTIAL-REDUCTION: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-NO-PARTIAL-REDUCTION: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-NO-PARTIAL-REDUCTION: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-NO-PARTIAL-REDUCTION: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/ARM/replicating-load-store-costs.ll
new file mode 100644
index 0000000..fd83a01
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/ARM/replicating-load-store-costs.ll
@@ -0,0 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -p loop-vectorize -S %s | FileCheck %s
+
+target triple = "armv7-unknown-linux-gnueabihf"
+
+define void @replicating_load_used_by_other_load(i32 %arg, ptr %a, i32 %b) {
+; CHECK-LABEL: define void @replicating_load_used_by_other_load(
+; CHECK-SAME: i32 [[ARG:%.*]], ptr [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[ARG]], %[[ENTRY]] ]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[IV]], 1
+; CHECK-NEXT: [[AND_1:%.*]] = and i32 [[IV]], 1
+; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 [[IV]], 2
+; CHECK-NEXT: [[SHL_2:%.*]] = shl i32 [[IV]], 1
+; CHECK-NEXT: [[AND_2:%.*]] = and i32 [[SHL_2]], 2
+; CHECK-NEXT: [[OR_1:%.*]] = or i32 [[AND_2]], [[AND_1]]
+; CHECK-NEXT: [[OR_2:%.*]] = or i32 [[OR_1]], [[SHL_1]]
+; CHECK-NEXT: [[XOR_1:%.*]] = xor i32 [[B]], [[OR_2]]
+; CHECK-NEXT: [[XOR_2:%.*]] = xor i32 [[XOR_1]], [[ARG]]
+; CHECK-NEXT: [[SHR_2:%.*]] = lshr i32 [[SHL_1]], 1
+; CHECK-NEXT: [[XOR_3:%.*]] = xor i32 [[SHR]], [[ARG]]
+; CHECK-NEXT: [[AND_3:%.*]] = and i32 [[XOR_3]], 1
+; CHECK-NEXT: [[AND_4:%.*]] = and i32 [[IV]], 2147483646
+; CHECK-NEXT: [[OR_3:%.*]] = or i32 [[AND_3]], [[AND_4]]
+; CHECK-NEXT: [[AND_5:%.*]] = and i32 [[IV]], 254
+; CHECK-NEXT: [[SHL_3:%.*]] = shl i32 [[OR_3]], 1
+; CHECK-NEXT: [[XOR_4:%.*]] = xor i32 [[SHL_3]], 2
+; CHECK-NEXT: [[OR_4:%.*]] = or i32 [[AND_5]], [[XOR_4]]
+; CHECK-NEXT: [[XOR_5:%.*]] = xor i32 [[SHR_2]], [[OR_4]]
+; CHECK-NEXT: [[XOR_6:%.*]] = xor i32 [[XOR_5]], [[XOR_2]]
+; CHECK-NEXT: [[AND_6:%.*]] = and i32 [[XOR_6]], 255
+; CHECK-NEXT: [[XOR_7:%.*]] = xor i32 [[AND_6]], 1
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[XOR_7]]
+; CHECK-NEXT: [[LD:%.*]] = load i8, ptr [[GEP]], align 1
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LD]] to i32
+; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr i32, ptr null, i32 [[ZEXT]]
+; CHECK-NEXT: store i32 0, ptr [[GEP_2]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV_NEXT]], 100
+; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ %iv.next, %loop ], [ %arg, %entry ]
+ %shr = lshr i32 %iv, 1
+ %and.1 = and i32 %iv, 1
+ %shl.1 = shl i32 %iv, 2
+ %shl.2 = shl i32 %iv, 1
+ %and.2 = and i32 %shl.2, 2
+ %or.1 = or i32 %and.2, %and.1
+ %or.2 = or i32 %or.1, %shl.1
+ %xor.1 = xor i32 %b, %or.2
+ %xor.2 = xor i32 %xor.1, %arg
+ %shr.2 = lshr i32 %shl.1, 1
+ %xor.3 = xor i32 %shr, %arg
+ %and.3 = and i32 %xor.3, 1
+ %and.4 = and i32 %iv, 2147483646
+ %or.3 = or i32 %and.3, %and.4
+ %and.5 = and i32 %iv, 254
+ %shl.3 = shl i32 %or.3, 1
+ %xor.4 = xor i32 %shl.3, 2
+ %or.4 = or i32 %and.5, %xor.4
+ %xor.5 = xor i32 %shr.2, %or.4
+ %xor.6 = xor i32 %xor.5, %xor.2
+ %and.6 = and i32 %xor.6, 255
+ %xor.7 = xor i32 %and.6, 1
+ %gep = getelementptr i8, ptr %a, i32 %xor.7
+ %ld = load i8, ptr %gep, align 1
+ %zext = zext i8 %ld to i32
+ %gep.2 = getelementptr i32, ptr null, i32 %zext
+ store i32 0, ptr %gep.2, align 4
+ %iv.next = add i32 %iv, 1
+ %cmp = icmp eq i32 %iv.next, 100
+ br i1 %cmp, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/PGOProfile/memprof.ll b/llvm/test/Transforms/PGOProfile/memprof.ll
index c69d031..f6a89a8 100644
--- a/llvm/test/Transforms/PGOProfile/memprof.ll
+++ b/llvm/test/Transforms/PGOProfile/memprof.ll
@@ -38,7 +38,7 @@
; ALL-NOT: no profile data available for function
;; Using a memprof-only profile for memprof-use should only give memprof metadata
-; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-print-match-info -stats 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,MEMPROFMATCHINFO,MEMPROFSTATS
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-print-match-info -stats 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,MEMPROFMATCHINFO,MEMPROFSTATS,AMBIG
; There should not be any PGO metadata
; MEMPROFONLY-NOT: !prof
@@ -51,10 +51,10 @@
;; Test the same thing but by passing the memory profile through to a default
;; pipeline via -memory-profile-file=, which should cause the necessary field
;; of the PGOOptions structure to be populated with the profile filename.
-; RUN: opt < %s -passes='default<O2>' -memory-profile-file=%t.memprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY
+; RUN: opt < %s -passes='default<O2>' -memory-profile-file=%t.memprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,AMBIG
;; Using a pgo+memprof profile for memprof-use should only give memprof metadata
-; RUN: opt < %s -passes='memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,AMBIG
;; Using a pgo-only profile for memprof-use should give an error
; RUN: not opt < %s -passes='memprof-use<profile-filename=%t.pgoprofdata>' -S 2>&1 | FileCheck %s --check-prefixes=MEMPROFWITHPGOONLY
@@ -72,7 +72,7 @@
;; Using a pgo+memprof profile for both memprof-use and pgo-instr-use should
;; give both memprof and pgo metadata.
-; RUN: opt < %s -passes='pgo-instr-use,memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-test-profile-file=%t.pgomemprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,PGO
+; RUN: opt < %s -passes='pgo-instr-use,memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-test-profile-file=%t.pgomemprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,PGO,AMBIG
;; Check that the total sizes are reported if requested. A message should be
;; emitted for the pruned context. Also check that remarks are emitted for the
@@ -108,7 +108,11 @@
;; However, with the same threshold, but hot hints not enabled, it should be
;; notcold again.
-; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-min-ave-lifetime-access-density-hot-threshold=0 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-min-ave-lifetime-access-density-hot-threshold=0 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,AMBIG
+
+;; Test that we don't get an ambiguous memprof attribute when
+;; -memprof-ambiguous-attributes is disabled.
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-ambiguous-attributes=false 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,NOAMBIG
; MEMPROFMATCHINFO: MemProf notcold context with id 1093248920606587996 has total profiled size 10 is matched with 1 frames
; MEMPROFMATCHINFO: MemProf notcold context with id 5725971306423925017 has total profiled size 10 is matched with 1 frames
@@ -140,7 +144,7 @@ target triple = "x86_64-unknown-linux-gnu"
; PGO: !prof
define dso_local noundef ptr @_Z3foov() #0 !dbg !10 {
entry:
- ; MEMPROF: call {{.*}} @_Znam{{.*}} !memprof ![[M1:[0-9]+]], !callsite ![[C1:[0-9]+]]
+ ; MEMPROF: call {{.*}} @_Znam{{.*}} #[[A0:[0-9]+]]{{.*}} !memprof ![[M1:[0-9]+]], !callsite ![[C1:[0-9]+]]
; MEMPROFNOCOLINFO: call {{.*}} @_Znam{{.*}} !memprof ![[M1:[0-9]+]], !callsite ![[C1:[0-9]+]]
%call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #6, !dbg !13
ret ptr %call, !dbg !14
@@ -364,6 +368,9 @@ for.end: ; preds = %for.cond
ret i32 0, !dbg !103
}
+;; We optionally apply an ambiguous memprof attribute to ambiguous allocations
+; AMBIG: #[[A0]] = { builtin allocsize(0) "memprof"="ambiguous" }
+; NOAMBIG: #[[A0]] = { builtin allocsize(0) }
; MEMPROF: #[[A1]] = { builtin allocsize(0) "memprof"="notcold" }
; MEMPROF: #[[A2]] = { builtin allocsize(0) "memprof"="cold" }
; MEMPROF: ![[M1]] = !{![[MIB1:[0-9]+]], ![[MIB2:[0-9]+]], ![[MIB3:[0-9]+]], ![[MIB4:[0-9]+]]}
diff --git a/llvm/test/Transforms/SCCP/relax-range-checks.ll b/llvm/test/Transforms/SCCP/relax-range-checks.ll
index 90722f3..34e4813 100644
--- a/llvm/test/Transforms/SCCP/relax-range-checks.ll
+++ b/llvm/test/Transforms/SCCP/relax-range-checks.ll
@@ -89,4 +89,28 @@ define i1 @relax_range_check_multiuse(i8 range(i8 0, 5) %x) {
ret i1 %ret
}
+define i1 @range_check_to_icmp_eq1(i32 range(i32 0, 4) %x) {
+; CHECK-LABEL: define i1 @range_check_to_icmp_eq1(
+; CHECK-SAME: i32 range(i32 0, 4) [[X:%.*]]) {
+; CHECK-NEXT: [[OFF:%.*]] = add nsw i32 [[X]], -3
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 3
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %off = add nsw i32 %x, -3
+ %cmp = icmp ult i32 %off, 2
+ ret i1 %cmp
+}
+
+define i1 @range_check_to_icmp_eq2(i32 range(i32 -1, 2) %x) {
+; CHECK-LABEL: define i1 @range_check_to_icmp_eq2(
+; CHECK-SAME: i32 range(i32 -1, 2) [[X:%.*]]) {
+; CHECK-NEXT: [[OFF:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %off = add nsw i32 %x, -1
+ %cmp = icmp ult i32 %off, -2
+ ret i1 %cmp
+}
+
declare void @use(i8)
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
index 655db54..a079203 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
@@ -10,14 +10,10 @@ define void @test() {
; CHECK-NEXT: [[SUB4_I_I65_US:%.*]] = or i64 0, 1
; CHECK-NEXT: br label [[BODY:%.*]]
; CHECK: body:
-; CHECK-NEXT: [[ADD_I_I62_US:%.*]] = shl i64 0, 0
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> <i64 poison, i64 1>, i64 [[ADD_I_I62_US]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i64> zeroinitializer, [[TMP0]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [[CLASS_A:%.*]], <2 x ptr> zeroinitializer, <2 x i64> [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP2]], i32 4, <2 x i1> splat (i1 true), <2 x i32> poison)
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
-; CHECK-NEXT: [[CMP_I_I_I_I67_US:%.*]] = icmp slt i32 [[TMP4]], [[TMP5]]
+; CHECK-NEXT: [[TMP0:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> getelementptr ([[CLASS_A:%.*]], <2 x ptr> zeroinitializer, <2 x i64> <i64 0, i64 1>), i32 4, <2 x i1> splat (i1 true), <2 x i32> poison)
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
+; CHECK-NEXT: [[CMP_I_I_I_I67_US:%.*]] = icmp slt i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[SPEC_SELECT_I_I68_US:%.*]] = select i1 false, i64 [[SUB4_I_I65_US]], i64 0
; CHECK-NEXT: br label [[BODY]]
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll
index 7758596..87f2cca 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll
@@ -8,8 +8,8 @@ define i32 @test() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: store i32 152, ptr @f, align 4
; CHECK-NEXT: [[AGG_TMP_SROA_0_0_COPYLOAD_I:%.*]] = load i32, ptr @f, align 4
-; CHECK-NEXT: [[ADD_I_I:%.*]] = shl i32 [[AGG_TMP_SROA_0_0_COPYLOAD_I]], 24
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i32> <i32 poison, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080>, i32 [[ADD_I_I]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> <i32 poison, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080>, i32 [[AGG_TMP_SROA_0_0_COPYLOAD_I]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = shl <8 x i32> [[TMP3]], <i32 24, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
; CHECK-NEXT: [[TMP1:%.*]] = add <8 x i32> <i32 83886080, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = ashr <8 x i32> [[TMP1]], splat (i32 24)
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i32> [[TMP2]], <i32 66440127, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
index 75aec45..3e0a374 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
@@ -247,32 +247,12 @@ entry:
}
define void @shl0(ptr noalias %dst, ptr noalias %src) {
-; NON-POW2-LABEL: @shl0(
-; NON-POW2-NEXT: entry:
-; NON-POW2-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
-; NON-POW2-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; NON-POW2-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
-; NON-POW2-NEXT: store i32 [[TMP0]], ptr [[DST]], align 4
-; NON-POW2-NEXT: [[TMP1:%.*]] = load <3 x i32>, ptr [[INCDEC_PTR]], align 4
-; NON-POW2-NEXT: [[TMP2:%.*]] = shl <3 x i32> [[TMP1]], <i32 1, i32 2, i32 3>
-; NON-POW2-NEXT: store <3 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
-; NON-POW2-NEXT: ret void
-;
-; POW2-ONLY-LABEL: @shl0(
-; POW2-ONLY-NEXT: entry:
-; POW2-ONLY-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
-; POW2-ONLY-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; POW2-ONLY-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
-; POW2-ONLY-NEXT: store i32 [[TMP0]], ptr [[DST]], align 4
-; POW2-ONLY-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
-; POW2-ONLY-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
-; POW2-ONLY-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR]], align 4
-; POW2-ONLY-NEXT: [[TMP2:%.*]] = shl <2 x i32> [[TMP1]], <i32 1, i32 2>
-; POW2-ONLY-NEXT: store <2 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
-; POW2-ONLY-NEXT: [[TMP3:%.*]] = load i32, ptr [[INCDEC_PTR4]], align 4
-; POW2-ONLY-NEXT: [[SHL8:%.*]] = shl i32 [[TMP3]], 3
-; POW2-ONLY-NEXT: store i32 [[SHL8]], ptr [[INCDEC_PTR6]], align 4
-; POW2-ONLY-NEXT: ret void
+; CHECK-LABEL: @shl0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[TMP0]], <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT: ret void
;
entry:
%incdec.ptr = getelementptr inbounds i32, ptr %src, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
index a5b1e9b..769b360 100644
--- a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
@@ -1,25 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %}
-; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X86 %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefix=AARCH64 %}
define i1 @test(i32 %0, i32 %1, i32 %p) {
-; CHECK-LABEL: define i1 @test(
-; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer
-; CHECK-NEXT: [[CMP6:%.*]] = icmp slt i32 0, [[P]]
-; CHECK-NEXT: [[TMP6:%.*]] = freeze <4 x i1> [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
-; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP7]], i1 true, i1 [[CMP6]]
-; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]]
-; CHECK-NEXT: [[TMP8:%.*]] = freeze i1 [[OP_RDX]]
-; CHECK-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP8]], i1 true, i1 [[OP_RDX1]]
-; CHECK-NEXT: ret i1 [[OP_RDX2]]
+; X86-LABEL: define i1 @test(
+; X86-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) {
+; X86-NEXT: entry:
+; X86-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; X86-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0
+; X86-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> zeroinitializer
+; X86-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]]
+; X86-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer
+; X86-NEXT: [[CMP6:%.*]] = icmp slt i32 0, [[P]]
+; X86-NEXT: [[TMP6:%.*]] = freeze <4 x i1> [[TMP5]]
+; X86-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
+; X86-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP7]], i1 true, i1 [[CMP6]]
+; X86-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]]
+; X86-NEXT: [[TMP8:%.*]] = freeze i1 [[OP_RDX]]
+; X86-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP8]], i1 true, i1 [[OP_RDX1]]
+; X86-NEXT: ret i1 [[OP_RDX2]]
+;
+; AARCH64-LABEL: define i1 @test(
+; AARCH64-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) {
+; AARCH64-NEXT: entry:
+; AARCH64-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; AARCH64-NEXT: [[SHL4:%.*]] = shl i32 0, [[TMP1]]
+; AARCH64-NEXT: [[CMP5:%.*]] = icmp slt i32 [[SHL4]], 0
+; AARCH64-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>, i32 [[TMP1]], i32 1
+; AARCH64-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 1>
+; AARCH64-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]]
+; AARCH64-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[P]], i32 0
+; AARCH64-NEXT: [[TMP6:%.*]] = icmp slt <4 x i32> [[TMP4]], [[TMP5]]
+; AARCH64-NEXT: [[TMP7:%.*]] = freeze <4 x i1> [[TMP6]]
+; AARCH64-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP7]])
+; AARCH64-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP8]], i1 true, i1 [[CMP5]]
+; AARCH64-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]]
+; AARCH64-NEXT: [[TMP9:%.*]] = freeze i1 [[OP_RDX]]
+; AARCH64-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP9]], i1 true, i1 [[OP_RDX1]]
+; AARCH64-NEXT: ret i1 [[OP_RDX2]]
;
entry:
%cmp1 = icmp sgt i32 %0, 0
diff --git a/llvm/test/Transforms/SimplifyCFG/indirectbr.ll b/llvm/test/Transforms/SimplifyCFG/indirectbr.ll
index 87d8b39..2fa36b0 100644
--- a/llvm/test/Transforms/SimplifyCFG/indirectbr.ll
+++ b/llvm/test/Transforms/SimplifyCFG/indirectbr.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt -S -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s
; SimplifyCFG should eliminate redundant indirectbr edges.
@@ -8,7 +8,11 @@ declare void @A()
declare void @B(i32)
declare void @C()
-define void @indbrtest0(ptr %P, ptr %Q) {
+;.
+; CHECK: @anchor = constant [13 x ptr] [ptr blockaddress(@indbrtest3, %L1), ptr blockaddress(@indbrtest3, %L2), ptr inttoptr (i32 1 to ptr), ptr blockaddress(@indbrtest4, %L1), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr)]
+; CHECK: @xblkx.bbs = internal unnamed_addr constant [9 x ptr] [ptr blockaddress(@indbrtest7, %xlab4x), ptr blockaddress(@indbrtest7, %xlab4x), ptr blockaddress(@indbrtest7, %v2j), ptr blockaddress(@indbrtest7, %xlab4x), ptr blockaddress(@indbrtest7, %xlab4x), ptr blockaddress(@indbrtest7, %xlab4x), ptr blockaddress(@indbrtest7, %xlab4x), ptr blockaddress(@indbrtest7, %xlab4x), ptr blockaddress(@indbrtest7, %v2j)]
+;.
+define void @indbrtest0(ptr %P, ptr %Q) !prof !0 {
; CHECK-LABEL: @indbrtest0(
; CHECK-NEXT: entry:
; CHECK-NEXT: store ptr blockaddress(@indbrtest0, [[BB0:%.*]]), ptr [[P:%.*]], align 8
@@ -16,7 +20,7 @@ define void @indbrtest0(ptr %P, ptr %Q) {
; CHECK-NEXT: store ptr blockaddress(@indbrtest0, [[BB2:%.*]]), ptr [[P]], align 8
; CHECK-NEXT: call void @foo()
; CHECK-NEXT: [[T:%.*]] = load ptr, ptr [[Q:%.*]], align 8
-; CHECK-NEXT: indirectbr ptr [[T]], [label [[BB0]], label [[BB1]], label %BB2]
+; CHECK-NEXT: indirectbr ptr [[T]], [label [[BB0]], label [[BB1]], label %BB2], !prof [[PROF1:![0-9]+]]
; CHECK: BB0:
; CHECK-NEXT: call void @A()
; CHECK-NEXT: br label [[BB1]]
@@ -36,7 +40,7 @@ entry:
store ptr blockaddress(@indbrtest0, %BB2), ptr %P
call void @foo()
%t = load ptr, ptr %Q
- indirectbr ptr %t, [label %BB0, label %BB1, label %BB2, label %BB0, label %BB1, label %BB2]
+ indirectbr ptr %t, [label %BB0, label %BB1, label %BB2, label %BB0, label %BB1, label %BB2], !prof !1
BB0:
call void @A()
br label %BB1
@@ -103,10 +107,10 @@ BB0:
; SimplifyCFG should turn the indirectbr into a conditional branch on the
; condition of the select.
-define void @indbrtest3(i1 %cond, ptr %address) nounwind {
+define void @indbrtest3(i1 %cond, ptr %address) nounwind !prof !0 {
; CHECK-LABEL: @indbrtest3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[L1:%.*]], label [[L2:%.*]]
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[L1:%.*]], label [[L2:%.*]], !prof [[PROF2:![0-9]+]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
; CHECK: L1:
@@ -117,8 +121,8 @@ define void @indbrtest3(i1 %cond, ptr %address) nounwind {
; CHECK-NEXT: br label [[COMMON_RET]]
;
entry:
- %indirect.goto.dest = select i1 %cond, ptr blockaddress(@indbrtest3, %L1), ptr blockaddress(@indbrtest3, %L2)
- indirectbr ptr %indirect.goto.dest, [label %L1, label %L2, label %L3]
+ %indirect.goto.dest = select i1 %cond, ptr blockaddress(@indbrtest3, %L1), ptr blockaddress(@indbrtest3, %L2), !prof !2
+ indirectbr ptr %indirect.goto.dest, [label %L1, label %L2, label %L3], !prof !3
L1:
call void @A()
@@ -385,3 +389,15 @@ declare i32 @xfunc5x()
declare i8 @xfunc7x()
declare i32 @xselectorx()
declare i32 @xactionx()
+
+!0 = !{!"function_entry_count", i32 10}
+!1 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17}
+!2 = !{!"branch_weights", i32 3, i32 5}
+!3 = !{!"branch_weights", i32 3, i32 5, i32 7}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind }
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i32 10}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 14, i32 18, i32 24}
+; CHECK: [[PROF2]] = !{!"branch_weights", i32 3, i32 5}
+;.
diff --git a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
index 4a457cc..a0e29dd 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
@@ -7,8 +7,7 @@ declare void @foo(i32)
define void @test(i1 %a) {
; CHECK-LABEL: define void @test(
; CHECK-SAME: i1 [[A:%.*]]) {
-; CHECK-NEXT: [[A_OFF:%.*]] = add i1 [[A]], true
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i1 [[A_OFF]], true
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i1 [[A]], true
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
@@ -209,8 +208,7 @@ define void @test5(i8 %a) {
; CHECK-SAME: i8 [[A:%.*]]) {
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[A]], 2
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], -1
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], 1
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
@@ -243,8 +241,7 @@ define void @test6(i8 %a) {
; CHECK-NEXT: [[AND:%.*]] = and i8 [[A]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -2
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], -1
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
@@ -279,8 +276,7 @@ define void @test7(i8 %a) {
; CHECK-NEXT: [[AND:%.*]] = and i8 [[A]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -2
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], -1
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll
index 8f2ae2d..0fc3c19 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll
@@ -188,4 +188,217 @@ exit:
ret void
}
+define i32 @wrapping_known_range(i8 range(i8 0, 6) %arg) {
+; CHECK-LABEL: @wrapping_known_range(
+; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], 3
+; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 4, label %if
+ i8 5, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i32 @wrapping_known_range_2(i8 range(i8 0, 6) %arg) {
+; CHECK-LABEL: @wrapping_known_range_2(
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[ARG:%.*]], 1
+; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 2, label %if
+ i8 3, label %if
+ i8 4, label %if
+ i8 5, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i32 @wrapping_range(i8 %arg) {
+; CHECK-LABEL: @wrapping_range(
+; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], -4
+; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 -3, label %if
+ i8 -2, label %if
+ i8 -1, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i8 @wrapping_range_phi(i8 %arg) {
+; CHECK-LABEL: @wrapping_range_phi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], -2
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[SWITCH]], i8 0, i8 1
+; CHECK-NEXT: ret i8 [[SPEC_SELECT]]
+;
+entry:
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 -1, label %if
+ ]
+
+if:
+ %i = phi i8 [ 0, %else ], [ 1, %entry ], [ 1, %entry ]
+ ret i8 %i
+
+else:
+ br label %if
+}
+
+define i32 @no_continuous_wrapping_range(i8 %arg) {
+; CHECK-LABEL: @no_continuous_wrapping_range(
+; CHECK-NEXT: switch i8 [[ARG:%.*]], label [[ELSE:%.*]] [
+; CHECK-NEXT: i8 0, label [[IF:%.*]]
+; CHECK-NEXT: i8 -3, label [[IF]]
+; CHECK-NEXT: i8 -1, label [[IF]]
+; CHECK-NEXT: ]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 -3, label %if
+ i8 -1, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i32 @one_case_1(i32 %x) {
+; CHECK-LABEL: @one_case_1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i32 [[X:%.*]], 10
+; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[B]] ], [ [[TMP1:%.*]], [[A]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: a:
+; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: b:
+; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+entry:
+ switch i32 %x, label %unreachable [
+ i32 5, label %a
+ i32 6, label %a
+ i32 7, label %a
+ i32 10, label %b
+ ]
+
+unreachable:
+ unreachable
+a:
+ %0 = call i32 @f(i32 0)
+ ret i32 %0
+b:
+ %1 = call i32 @f(i32 1)
+ ret i32 %1
+}
+
+define i32 @one_case_2(i32 %x) {
+; CHECK-LABEL: @one_case_2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i32 [[X:%.*]], 5
+; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[A]] ], [ [[TMP1:%.*]], [[B]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: a:
+; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: b:
+; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+entry:
+ switch i32 %x, label %unreachable [
+ i32 5, label %a
+ i32 10, label %b
+ i32 11, label %b
+ i32 12, label %b
+ i32 13, label %b
+ ]
+
+unreachable:
+ unreachable
+a:
+ %0 = call i32 @f(i32 0)
+ ret i32 %0
+b:
+ %1 = call i32 @f(i32 1)
+ ret i32 %1
+}
+
declare void @bar(ptr nonnull dereferenceable(4))
diff --git a/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s
new file mode 100644
index 0000000..c8a5746
--- /dev/null
+++ b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s
@@ -0,0 +1,10 @@
+REQUIRES: aarch64-registered-target
+// Flakey on SVE buildbots, disabled pending invesgitation.
+UNSUPPORTED: target={{.*}}
+
+RUN: llvm-exegesis -mtriple=aarch64 -mcpu=neoverse-v2 -mode=latency --dump-object-to-disk=%d --opcode-name=FMOVWSr --benchmark-phase=assemble-measured-code 2>&1
+RUN: llvm-objdump -d %d > %t.s
+RUN: FileCheck %s < %t.s
+
+CHECK-NOT: ld{{[1-4]}}
+CHECK-NOT: st{{[1-4]}}
diff --git a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s
index d777d31..8e0d47e 100644
--- a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 2 8 1.00 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 1 1 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 1 1 1.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 1 1 1.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s
index 99b88fe..f6be964 100644
--- a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 2 8 1.00 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 1 1 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 1 1 1.00 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 1 1 1.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 1 1 1.00 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 1 1 1.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s
index 08f07dc..5c987ee 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 2 10 1.00 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 1 3 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 1 3 1.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 1 3 1.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s
index 0194303..023026b 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 2 10 1.00 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 1 3 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 1 3 1.00 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 1 3 1.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 1 3 1.00 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 1 3 1.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s
index ed8a417..db1f9af 100644
--- a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 6 14 2.00 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 2 6 2.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 2 8 2.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 2 8 2.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s
index 3db09bc..9277a91 100644
--- a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 6 14 2.00 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 2 6 2.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 3 10 2.00 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 3 10 2.00 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 2 8 2.00 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 2 8 2.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 3 10 2.00 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 3 10 2.00 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 2 8 2.00 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 2 8 2.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandw %ymm16, %ymm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s
index 594518d..88e140d 100644
--- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 2 8 0.50 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 1 5 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 5 1.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 1 5 1.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 5 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 5 1.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 1 5 1.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 5 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s
index 7b9c2516..325835a 100644
--- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 2 8 0.50 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 1 4 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 1 0.50 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 2 1 0.50 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 1 0.50 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 4 1.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 1 4 1.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 4 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 1 0.50 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 2 1 0.50 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 1 0.50 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 4 1.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 1 4 1.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 4 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z}