aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/fround.ll24
-rw-r--r--llvm/test/Analysis/ScalarEvolution/zext-add.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir5
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll110
-rw-r--r--llvm/test/CodeGen/AArch64/adc.ll49
-rw-r--r--llvm/test/CodeGen/AArch64/addcarry-crash.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/cmp-to-cmn.ll437
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll52
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll91
-rw-r--r--llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll77
-rw-r--r--llvm/test/CodeGen/AArch64/neon-dotreduce.ll4360
-rw-r--r--llvm/test/CodeGen/AArch64/neon-extmul.ll108
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-and-tst.ll275
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vector-interleave.ll103
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vscale-combine.ll105
-rw-r--r--llvm/test/CodeGen/AArch64/tbl-loops.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add.ll337
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir1254
-rw-r--r--llvm/test/CodeGen/AMDGPU/add-max.ll62
-rw-r--r--llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll1783
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.ll633
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll98
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll184
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll539
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll164
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp10.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log10.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/memmove-var-size.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/rcp-pattern.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f32.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f64.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/udivrem24.ll1984
-rw-r--r--llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll29
-rw-r--r--llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/ARM/nop_concat_vectors.ll8
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll12
-rw-r--r--llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll91
-rw-r--r--llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/NVPTX/fold-movs.ll38
-rw-r--r--llvm/test/CodeGen/NVPTX/i8x4-instructions.ll168
-rw-r--r--llvm/test/CodeGen/NVPTX/ld-param-sink.ll47
-rw-r--r--llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll347
-rw-r--r--llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll5
-rw-r--r--llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll33
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/memset-inline.ll122
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/memset-inline.ll126
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir16
-rw-r--r--llvm/test/CodeGen/RISCV/zilsd.ll19
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll42
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/logical-struct-access.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll46
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll54
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll75
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll104
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll19
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-mul-07.ll114
-rw-r--r--llvm/test/CodeGen/WebAssembly/target-features-cpus.ll4
-rw-r--r--llvm/test/CodeGen/X86/apx/cf.ll35
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-assembly.ll43
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-tailcall.ll34
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section.ll38
-rw-r--r--llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir (renamed from llvm/test/CodeGen/X86/tail-dup-computed-goto.mir)44
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/globals.ll24
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s15
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s505
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s505
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s308
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s308
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s220
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s220
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s75
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s20
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt542
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt249
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt183
-rw-r--r--llvm/test/MC/ELF/many-instructions.s10
-rw-r--r--llvm/test/MC/X86/verify-callgraph-section.s58
-rw-r--r--llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll14
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll222
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll10
-rw-r--r--llvm/test/Transforms/IndVarSimplify/zext-nuw.ll6
-rw-r--r--llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll24
-rw-r--r--llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll182
-rw-r--r--llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/gepofconstgepi8.ll180
-rw-r--r--llvm/test/Transforms/InstCombine/getelementptr.ll96
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-custom-dl.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-gep.ll7
-rw-r--r--llvm/test/Transforms/InstCombine/indexed-gep-compares.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/known-phi-recurse.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/load-cmp.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll55
-rw-r--r--llvm/test/Transforms/InstCombine/pr39908.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll24
-rw-r--r--llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll36
-rw-r--r--llvm/test/Transforms/InstCombine/sub-gep.ll2
-rw-r--r--llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll48
-rw-r--r--llvm/test/Transforms/LICM/gep-reassociate.ll53
-rw-r--r--llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll5
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll8
-rw-r--r--llvm/test/Transforms/LoopUnroll/AArch64/vector.ll194
-rw-r--r--llvm/test/Transforms/LoopUnroll/RISCV/vector.ll603
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll62
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll88
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/f16.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll59
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll1481
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll729
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll634
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll54
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll27
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll328
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll27
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/intrinsic.ll201
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll38
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop.ll54
-rw-r--r--llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll21
-rw-r--r--llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml8
-rw-r--r--llvm/test/Transforms/LowerTypeTests/export-alias.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll42
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll279
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll280
-rw-r--r--llvm/test/Transforms/Scalarizer/intrinsics.ll47
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll6
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll6
-rw-r--r--llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll195
-rw-r--r--llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll95
-rw-r--r--llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll13
-rw-r--r--llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll889
-rw-r--r--llvm/test/lit.cfg.py13
-rw-r--r--llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test778
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexebin1611256 -> 1666656 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofrawbin75792 -> 20256 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic.memprofexebin1604896 -> 1660336 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic.memprofrawbin1152 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexebin0 -> 1604896 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofrawbin0 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/buildid.memprofexebin1604904 -> 1660336 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/buildid.memprofrawbin1152 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/inline.memprofexebin1605480 -> 1660912 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/inline.memprofrawbin976 -> 976 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/multi.memprofexebin1604912 -> 1660352 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/multi.memprofrawbin1920 -> 1920 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexebin1606576 -> 1661960 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofrawbin74952 -> 19608 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/pic.memprofexebin1607856 -> 1663288 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/pic.memprofrawbin1152 -> 1152 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic-histogram.test4
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic.test4
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic_v4.test102
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-inline.test2
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-multi.test2
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-padding-histogram.test154
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-pic.test4
-rw-r--r--llvm/test/tools/obj2yaml/ELF/eflags.yaml31
-rw-r--r--llvm/test/tools/yaml2obj/file-header-flags.yaml25
242 files changed, 20375 insertions, 8131 deletions
diff --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll
index 189e57e..23572898 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fround.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll
@@ -446,13 +446,13 @@ define void @lrint() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i32 @llvm.lrint.i32.f64(double poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.lrint.v2i32.v2f64(<2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.lrint.v4i32.v4f64(<4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.lrint.v8i32.v8f64(<8 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.lrint.v16i32.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i32> @llvm.lrint.v4i32.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i32> @llvm.lrint.v8i32.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i32> @llvm.lrint.v16i32.v16f64(<16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call i32 @llvm.lrint.i32.bf16(bfloat poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call <2 x i64> @llvm.lrint.v2i64.v2bf16(<2 x bfloat> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %32 = call <4 x i64> @llvm.lrint.v4i64.v4bf16(<4 x bfloat> poison)
@@ -708,13 +708,13 @@ define void @lround() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i32 @llvm.lround.i32.f64(double poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.lround.v2i32.v2f64(<2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f64(<vscale x 16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call i64 @llvm.lround.i64.bf16(bfloat poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.lround.v2i64.v2bf16(<2 x bfloat> poison)
diff --git a/llvm/test/Analysis/ScalarEvolution/zext-add.ll b/llvm/test/Analysis/ScalarEvolution/zext-add.ll
index 3290ee2..a08feef 100644
--- a/llvm/test/Analysis/ScalarEvolution/zext-add.ll
+++ b/llvm/test/Analysis/ScalarEvolution/zext-add.ll
@@ -17,7 +17,7 @@ define void @test_push_constant_into_zext(ptr %dst, ptr %src, i32 %n, i64 %offse
; CHECK-NEXT: %l = load i8, ptr %outer.ptr, align 1
; CHECK-NEXT: --> %l U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %inner.loop: Variant, %outer.loop: Variant }
; CHECK-NEXT: %ptr.iv.next = getelementptr i8, ptr %ptr.iv, i64 %offset
-; CHECK-NEXT: --> {(%offset + %src),+,%offset}<%inner.loop> U: full-set S: full-set Exits: (((1 + (zext i32 (-1 + (1 smax %n))<nsw> to i64))<nuw><nsw> * %offset) + %src) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
+; CHECK-NEXT: --> {(%offset + %src),+,%offset}<%inner.loop> U: full-set S: full-set Exits: (((zext i32 (1 smax %n) to i64) * %offset) + %src) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
; CHECK-NEXT: %iv.next = add i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%inner.loop> U: [1,-2147483648) S: [1,-2147483648) Exits: (1 smax %n) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
; CHECK-NEXT: Determining loop execution counts for: @test_push_constant_into_zext
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
index 09e5a15..a422f60 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
@@ -667,11 +667,10 @@ body: |
; SELECT-NEXT: {{ $}}
; SELECT-NEXT: %zero:gpr64 = COPY $xzr
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
- ; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0
+ ; SELECT-NEXT: %cmp_lhs:gpr64 = SUBSXrs %zero, %reg0, 63, implicit-def dead $nzcv
; SELECT-NEXT: %reg1:gpr64 = COPY $x1
; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0
- ; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv
- ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv
+ ; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_lhs, %sext_in_reg, 131, implicit-def $nzcv
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; SELECT-NEXT: $w0 = COPY %cmp
; SELECT-NEXT: RET_ReallyLR implicit $w0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
index 578038b..d9cdac4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
@@ -1,8 +1,8 @@
; RUN: llc -O3 -aarch64-enable-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-NoAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64"
@@ -38,24 +38,12 @@ if.end: ; preds = %if.then, %entry
; CHECK-NOT: madd
; CHECK:ldr
-; CHECK-NoAA-LABEL: @test_GEP_CSE(
-; CHECK-NoAA: [[PTR0:%[a-zA-Z0-9]+]] = ptrtoint ptr %string to i64
-; CHECK-NoAA: [[PTR1:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-NoAA: [[PTR2:%[a-zA-Z0-9]+]] = add i64 [[PTR0]], [[PTR1]]
-; CHECK-NoAA: add i64 [[PTR2]], 23052
-; CHECK-NoAA: inttoptr
-; CHECK-NoAA: if.then:
-; CHECK-NoAA-NOT: ptrtoint
-; CHECK-NoAA-NOT: mul
-; CHECK-NoAA: add i64 [[PTR2]], 23048
-; CHECK-NoAA: inttoptr
-
-; CHECK-UseAA-LABEL: @test_GEP_CSE(
-; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]]
-; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23052
-; CHECK-UseAA: if.then:
-; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23048
+; CHECK-IR-LABEL: @test_GEP_CSE(
+; CHECK-IR: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
+; CHECK-IR: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]]
+; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23052
+; CHECK-IR: if.then:
+; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23048
%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]}
%struct.pt = type { ptr, i32, i32 }
diff --git a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
index f7e16b8..9947fba 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
@@ -38,14 +38,12 @@ define <16 x i32> @mul_i32(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v4.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v5.8h, v1.16b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v3.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v3.8h
-; CHECK-GI-NEXT: umull v2.4s, v4.4h, v5.4h
-; CHECK-GI-NEXT: umull2 v3.4s, v4.8h, v5.8h
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v3.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v0.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -75,26 +73,20 @@ define <16 x i64> @mul_i64(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v4.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v2.8h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v16.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v17.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v4.2s, v2.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v4.4s, v2.4s
-; CHECK-GI-NEXT: umull v2.2d, v5.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v5.4s, v3.4s
-; CHECK-GI-NEXT: umull v4.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umull2 v5.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umull v6.2d, v16.2s, v17.2s
-; CHECK-GI-NEXT: umull2 v7.2d, v16.4s, v17.4s
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v5.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v7.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
+; CHECK-GI-NEXT: ushll v4.2d, v5.2s, #0
+; CHECK-GI-NEXT: ushll2 v5.2d, v5.4s, #0
+; CHECK-GI-NEXT: ushll v6.2d, v7.2s, #0
+; CHECK-GI-NEXT: ushll2 v7.2d, v7.4s, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
@@ -142,18 +134,12 @@ define <16 x i32> @mla_i32(<16 x i8> %a, <16 x i8> %b, <16 x i32> %c) {
;
; CHECK-GI-LABEL: mla_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v6.4h, v7.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v6.8h, v7.8h
-; CHECK-GI-NEXT: umlal v4.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v5.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v6.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v7.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v6.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v6.8h
+; CHECK-GI-NEXT: uaddw v2.4s, v4.4s, v7.4h
+; CHECK-GI-NEXT: uaddw2 v3.4s, v5.4s, v7.8h
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -186,35 +172,21 @@ define <16 x i64> @mla_i64(<16 x i8> %a, <16 x i8> %b, <16 x i64> %c) {
;
; CHECK-GI-LABEL: mla_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov v16.16b, v2.16b
-; CHECK-GI-NEXT: mov v17.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
-; CHECK-GI-NEXT: mov v4.16b, v6.16b
-; CHECK-GI-NEXT: mov v5.16b, v7.16b
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v18.4s, v6.4h, #0
-; CHECK-GI-NEXT: ushll v20.4s, v7.4h, #0
-; CHECK-GI-NEXT: ushll2 v19.4s, v6.8h, #0
-; CHECK-GI-NEXT: ushll v21.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v22.4s, v7.8h, #0
-; CHECK-GI-NEXT: ushll v23.4s, v1.4h, #0
-; CHECK-GI-NEXT: ldp q6, q7, [sp]
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v16.2d, v18.2s, v20.2s
-; CHECK-GI-NEXT: umlal2 v17.2d, v18.4s, v20.4s
-; CHECK-GI-NEXT: umlal v2.2d, v19.2s, v22.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v19.4s, v22.4s
-; CHECK-GI-NEXT: umlal v4.2d, v21.2s, v23.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v21.4s, v23.4s
-; CHECK-GI-NEXT: umlal v6.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v7.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v16.16b
-; CHECK-GI-NEXT: mov v1.16b, v17.16b
+; CHECK-GI-NEXT: umull v16.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ldp q19, q20, [sp]
+; CHECK-GI-NEXT: ushll v1.4s, v16.4h, #0
+; CHECK-GI-NEXT: ushll2 v16.4s, v16.8h, #0
+; CHECK-GI-NEXT: ushll v17.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v18.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v16.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v16.4s
+; CHECK-GI-NEXT: uaddw v4.2d, v6.2d, v17.2s
+; CHECK-GI-NEXT: uaddw2 v5.2d, v7.2d, v17.4s
+; CHECK-GI-NEXT: uaddw v6.2d, v19.2d, v18.2s
+; CHECK-GI-NEXT: uaddw2 v7.2d, v20.2d, v18.4s
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll
index 4b1393f..12e8bf2 100644
--- a/llvm/test/CodeGen/AArch64/adc.ll
+++ b/llvm/test/CodeGen/AArch64/adc.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefix=CHECK-LE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefixes=CHECK-LE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefixes=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 -global-isel | FileCheck --check-prefixes=CHECK-GI %s
define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-LE-LABEL: test_simple:
@@ -18,11 +19,16 @@ define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-BE-NEXT: subs x1, x8, x5
; CHECK-BE-NEXT: sbc x0, x9, x4
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_simple:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x8, x0, x2
+; CHECK-GI-NEXT: adc x9, x1, x3
+; CHECK-GI-NEXT: subs x0, x8, x4
+; CHECK-GI-NEXT: sbc x1, x9, x5
+; CHECK-GI-NEXT: ret
%valadd = add i128 %a, %b
-
%valsub = sub i128 %valadd, %c
-
ret i128 %valsub
}
@@ -38,9 +44,13 @@ define i128 @test_imm(i128 %a) {
; CHECK-BE-NEXT: adds x1, x1, #12
; CHECK-BE-NEXT: cinc x0, x0, hs
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_imm:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x0, x0, #12
+; CHECK-GI-NEXT: adc x1, x1, xzr
+; CHECK-GI-NEXT: ret
%val = add i128 %a, 12
-
ret i128 %val
}
@@ -58,11 +68,16 @@ define i128 @test_shifted(i128 %a, i128 %b) {
; CHECK-BE-NEXT: adds x1, x1, x3, lsl #45
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_shifted:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: lsr x8, x2, #19
+; CHECK-GI-NEXT: adds x0, x0, x2, lsl #45
+; CHECK-GI-NEXT: orr x8, x8, x3, lsl #45
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%rhs = shl i128 %b, 45
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
@@ -86,11 +101,19 @@ define i128 @test_extended(i128 %a, i16 %b) {
; CHECK-BE-NEXT: extr x8, x9, x8, #61
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_extended:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: ; kill: def $w2 killed $w2 def $x2
+; CHECK-GI-NEXT: sxth x8, w2
+; CHECK-GI-NEXT: adds x0, x0, w2, sxth #3
+; CHECK-GI-NEXT: asr x9, x8, #63
+; CHECK-GI-NEXT: lsr x8, x8, #61
+; CHECK-GI-NEXT: orr x8, x8, x9, lsl #3
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%ext = sext i16 %b to i128
%rhs = shl i128 %ext, 3
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
diff --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
index be75ab1..b4556c7 100644
--- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll
+++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
@@ -1,16 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
target triple = "arm64-apple-ios7.0"
define i64 @foo(ptr nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 {
-; CHECK-LABEL: foo:
-; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: lsr x8, x1, #32
-; CHECK-NEXT: ldr w9, [x0, #4]
-; CHECK-NEXT: cmn x3, x2
-; CHECK-NEXT: umull x8, w9, w8
-; CHECK-NEXT: cinc x0, x8, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: lsr x8, x1, #32
+; CHECK-SD-NEXT: ldr w9, [x0, #4]
+; CHECK-SD-NEXT: cmn x3, x2
+; CHECK-SD-NEXT: umull x8, w9, w8
+; CHECK-SD-NEXT: cinc x0, x8, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: ldr x8, [x0]
+; CHECK-GI-NEXT: lsr x9, x1, #32
+; CHECK-GI-NEXT: cmn x3, x2
+; CHECK-GI-NEXT: cset w10, hs
+; CHECK-GI-NEXT: lsr x8, x8, #32
+; CHECK-GI-NEXT: and x10, x10, #0x1
+; CHECK-GI-NEXT: umaddl x0, w8, w9, x10
+; CHECK-GI-NEXT: ret
entry:
%0 = lshr i64 %a, 32
%1 = load i64, ptr %ptr, align 8
@@ -24,3 +37,6 @@ entry:
}
attributes #0 = { norecurse nounwind readonly }
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index b325851..78881c8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s -check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
diff --git a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
index 634d1b9..5f5b27a 100644
--- a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
@@ -59,37 +59,33 @@ bb27: ; preds = %bb9, %bb8
define void @avoid_promotion_2_and(ptr nocapture noundef %arg) {
; CHECK-LABEL: avoid_promotion_2_and:
; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: add x8, x0, #32
-; CHECK-NEXT: b LBB1_2
-; CHECK-NEXT: LBB1_1: ; %latch
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: cmp w9, #2
-; CHECK-NEXT: add x8, x8, #56
-; CHECK-NEXT: b.ls LBB1_4
-; CHECK-NEXT: LBB1_2: ; %loop
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: add x9, x0, #32
+; CHECK-NEXT: LBB1_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr w9, [x8, #20]
-; CHECK-NEXT: cmp w9, #3
-; CHECK-NEXT: b.lo LBB1_1
-; CHECK-NEXT: ; %bb.3: ; %then
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: ldp w13, w12, [x8, #12]
-; CHECK-NEXT: ldr w10, [x8]
+; CHECK-NEXT: ldr w10, [x9, #20]
+; CHECK-NEXT: cmp w10, #3
+; CHECK-NEXT: b.lo LBB1_3
+; CHECK-NEXT: ; %bb.2: ; %then
+; CHECK-NEXT: ; in Loop: Header=BB1_1 Depth=1
+; CHECK-NEXT: ldp w13, w12, [x9, #12]
+; CHECK-NEXT: ldr w10, [x9]
; CHECK-NEXT: ldr x11, [x0]
-; CHECK-NEXT: ldr w14, [x8, #8]
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: ldr w14, [x9, #8]
; CHECK-NEXT: lsl w10, w10, w13
; CHECK-NEXT: ldrb w11, [x11, x12]
; CHECK-NEXT: eor w10, w10, w11
-; CHECK-NEXT: ldur w11, [x8, #-24]
+; CHECK-NEXT: ldur w11, [x9, #-24]
; CHECK-NEXT: and w10, w10, w14
-; CHECK-NEXT: ldp x14, x13, [x8, #-16]
-; CHECK-NEXT: str w10, [x8]
+; CHECK-NEXT: ldp x14, x13, [x9, #-16]
+; CHECK-NEXT: str w10, [x9], #56
; CHECK-NEXT: and w11, w11, w12
; CHECK-NEXT: ldrh w15, [x13, w10, uxtw #1]
; CHECK-NEXT: strh w15, [x14, w11, uxtw #1]
; CHECK-NEXT: strh w12, [x13, w10, uxtw #1]
; CHECK-NEXT: b LBB1_1
-; CHECK-NEXT: LBB1_4: ; %exit
+; CHECK-NEXT: LBB1_3: ; %exit.critedge
; CHECK-NEXT: ret
entry:
br label %loop
diff --git a/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..c4c54175
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..b47607e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..94b657c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
index 5765e0a..b3ce9d2 100644
--- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
@@ -1,14 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "arm64"
define i1 @test_EQ_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp eq i64 %add, %a
@@ -16,11 +23,19 @@ entry:
}
define i1 @test_EQ_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -55,11 +70,19 @@ entry:
}
define i1 @test_EQ_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -68,11 +91,17 @@ entry:
}
define i1 @test_EQ_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp eq i32 %add, %a
@@ -218,11 +247,17 @@ entry:
}
define i1 @test_NE_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp ne i64 %add, %a
@@ -230,11 +265,19 @@ entry:
}
define i1 @test_NE_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -269,11 +312,19 @@ entry:
}
define i1 @test_NE_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -282,11 +333,17 @@ entry:
}
define i1 @test_NE_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp ne i32 %add, %a
@@ -444,161 +501,281 @@ define i1 @cmn_large_imm(i32 %a) {
}
define i1 @almost_immediate_neg_slt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_slt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i64 %x, -16773121
ret i1 %cmp
}
@@ -637,6 +814,24 @@ define i1 @cmn_nsw_neg(i32 %a, i32 %b) {
ret i1 %cmp
}
+define i1 @cmn_swap(i32 %a, i32 %b) {
+; CHECK-SD-LABEL: cmn_swap:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmn_swap:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
+ %sub = sub nsw i32 0, %b
+ %cmp = icmp sgt i32 %sub, %a
+ ret i1 %cmp
+}
+
+
define i1 @cmn_nsw_neg_64(i64 %a, i64 %b) {
; CHECK-LABEL: cmn_nsw_neg_64:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
index 880bd29..d67aa08 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
@@ -14,20 +14,19 @@ target triple = "aarch64"
define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: mov w8, #100 // =0x64
-; CHECK-NEXT: cntd x9
; CHECK-NEXT: whilelo p1.d, xzr, x8
+; CHECK-NEXT: cntd x9
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: mov x11, x9
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
+; CHECK-NEXT: mov x11, x9
; CHECK-NEXT: .LBB0_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: zip2 p2.d, p1.d, p1.d
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: zip1 p1.d, p1.d, p1.d
; CHECK-NEXT: ld1d { z2.d }, p2/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1d { z4.d }, p2/z, [x1, #1, mul vl]
@@ -39,14 +38,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: whilelo p1.d, x11, x8
; CHECK-NEXT: add x11, x11, x9
; CHECK-NEXT: b.mi .LBB0_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -111,21 +110,20 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %cond) {
; CHECK-LABEL: complex_mul_predicated_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntd x9
-; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: neg x10, x9
+; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: and x10, x10, x11
; CHECK-NEXT: rdvl x11, #2
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB1_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld1w { z2.d }, p0/z, [x2, x8, lsl #2]
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: add x8, x8, x9
; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0
; CHECK-NEXT: cmp x10, x8
@@ -141,12 +139,12 @@ define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: b.ne .LBB1_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -213,21 +211,20 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, ptr %cond) {
; CHECK-LABEL: complex_mul_predicated_x2_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: mov w8, #100 // =0x64
-; CHECK-NEXT: cntd x9
; CHECK-NEXT: whilelo p1.d, xzr, x8
+; CHECK-NEXT: cntd x9
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: cnth x11
; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: cnth x11
; CHECK-NEXT: mov x12, x9
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB2_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld1w { z2.d }, p1/z, [x2]
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: add x2, x2, x11
; CHECK-NEXT: and z2.d, z2.d, #0xffffffff
; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
@@ -243,14 +240,14 @@ define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, pt
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: whilelo p1.d, x12, x8
; CHECK-NEXT: add x12, x12, x9
; CHECK-NEXT: b.mi .LBB2_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
index 29be231..0646ca4 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
@@ -14,15 +14,14 @@ target triple = "aarch64"
define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntd x8
-; CHECK-NEXT: mov w10, #100 // =0x64
; CHECK-NEXT: neg x9, x8
+; CHECK-NEXT: mov w10, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and x9, x9, x10
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB0_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z2, [x0, #1, mul vl]
@@ -32,14 +31,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-NEXT: ldr z5, [x1]
; CHECK-NEXT: add x1, x1, x10
; CHECK-NEXT: add x0, x0, x10
-; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #0
-; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #90
+; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #0
+; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #0
+; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #90
+; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: b.ne .LBB0_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -183,17 +182,16 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64_unrolled:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntw x8
-; CHECK-NEXT: mov w10, #1000 // =0x3e8
+; CHECK-NEXT: movi v2.2d, #0000000000000000
+; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: neg x9, x8
+; CHECK-NEXT: mov w10, #1000 // =0x3e8
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and x9, x9, x10
; CHECK-NEXT: rdvl x10, #4
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
-; CHECK-NEXT: mov z2.d, z1.d
-; CHECK-NEXT: mov z3.d, z0.d
; CHECK-NEXT: .LBB2_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z4, [x0, #1, mul vl]
@@ -207,20 +205,20 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
; CHECK-NEXT: ldr z18, [x1, #3, mul vl]
; CHECK-NEXT: ldr z19, [x1, #2, mul vl]
; CHECK-NEXT: add x1, x1, x10
-; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #0
-; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #0
+; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #0
+; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #0
; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #0
; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #0
-; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #90
-; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #90
+; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #90
+; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #90
; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #90
; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #90
; CHECK-NEXT: b.ne .LBB2_1
; CHECK-NEXT: // %bb.2: // %exit.block
; CHECK-NEXT: uzp1 z4.d, z2.d, z3.d
-; CHECK-NEXT: uzp1 z5.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z5.d, z0.d, z1.d
; CHECK-NEXT: uzp2 z2.d, z2.d, z3.d
-; CHECK-NEXT: uzp2 z0.d, z1.d, z0.d
+; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d
; CHECK-NEXT: fadd z1.d, z4.d, z5.d
; CHECK-NEXT: fadd z2.d, z2.d, z0.d
; CHECK-NEXT: faddv d0, p0, z1.d
@@ -310,15 +308,15 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
; CHECK-LABEL: reduction_mix:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v2.2d, #0000000000000000
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: cntd x9
-; CHECK-NEXT: mov w11, #100 // =0x64
+; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: neg x10, x9
+; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: and x10, x10, x11
; CHECK-NEXT: rdvl x11, #2
-; CHECK-NEXT: zip2 z0.d, z2.d, z2.d
-; CHECK-NEXT: zip1 z1.d, z2.d, z2.d
; CHECK-NEXT: .LBB3_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z3, [x0]
@@ -327,13 +325,13 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
; CHECK-NEXT: ld1w { z5.d }, p0/z, [x3, x8, lsl #2]
; CHECK-NEXT: add x8, x8, x9
; CHECK-NEXT: cmp x10, x8
-; CHECK-NEXT: fadd z0.d, z4.d, z0.d
-; CHECK-NEXT: fadd z1.d, z3.d, z1.d
+; CHECK-NEXT: fadd z1.d, z4.d, z1.d
+; CHECK-NEXT: fadd z0.d, z3.d, z0.d
; CHECK-NEXT: add z2.d, z5.d, z2.d
; CHECK-NEXT: b.ne .LBB3_1
; CHECK-NEXT: // %bb.2: // %middle.block
-; CHECK-NEXT: uzp2 z3.d, z1.d, z0.d
-; CHECK-NEXT: uzp1 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp2 z3.d, z0.d, z1.d
+; CHECK-NEXT: uzp1 z1.d, z0.d, z1.d
; CHECK-NEXT: uaddv d2, p0, z2.d
; CHECK-NEXT: faddv d0, p0, z3.d
; CHECK-NEXT: faddv d1, p0, z1.d
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index a9618fd..05ecc9e 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -131,18 +131,83 @@ define <4 x i64> @interleave2_v4i64(<2 x i64> %vec0, <2 x i64> %vec1) {
ret <4 x i64> %retval
}
+define <4 x i16> @interleave2_same_const_splat_v4i16() {
+; CHECK-SD-LABEL: interleave2_same_const_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.4h, #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_same_const_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: ret
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 3))
+ ret <4 x i16> %retval
+}
+
+define <4 x i16> @interleave2_diff_const_splat_v4i16() {
+; CHECK-SD-LABEL: interleave2_diff_const_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: adrp x8, .LCPI11_0
+; CHECK-SD-NEXT: ldr d0, [x8, :lo12:.LCPI11_0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_diff_const_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: mov w9, #4 // =0x4
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: mov v1.h[1], w9
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 4))
+ ret <4 x i16> %retval
+}
-; Float declarations
-declare <4 x half> @llvm.vector.interleave2.v4f16(<2 x half>, <2 x half>)
-declare <8 x half> @llvm.vector.interleave2.v8f16(<4 x half>, <4 x half>)
-declare <16 x half> @llvm.vector.interleave2.v16f16(<8 x half>, <8 x half>)
-declare <4 x float> @llvm.vector.interleave2.v4f32(<2 x float>, <2 x float>)
-declare <8 x float> @llvm.vector.interleave2.v8f32(<4 x float>, <4 x float>)
-declare <4 x double> @llvm.vector.interleave2.v4f64(<2 x double>, <2 x double>)
-
-; Integer declarations
-declare <32 x i8> @llvm.vector.interleave2.v32i8(<16 x i8>, <16 x i8>)
-declare <16 x i16> @llvm.vector.interleave2.v16i16(<8 x i16>, <8 x i16>)
-declare <8 x i32> @llvm.vector.interleave2.v8i32(<4 x i32>, <4 x i32>)
-declare <4 x i64> @llvm.vector.interleave2.v4i64(<2 x i64>, <2 x i64>)
+define <4 x i16> @interleave2_same_nonconst_splat_v4i16(i16 %a) {
+; CHECK-SD-LABEL: interleave2_same_nonconst_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: dup v0.4h, w0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_same_nonconst_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: dup v0.4h, w0
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: ret
+ %ins = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <2 x i16> %ins, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat, <2 x i16> %splat)
+ ret <4 x i16> %retval
+}
+
+define <4 x i16> @interleave2_diff_nonconst_splat_v4i16(i16 %a, i16 %b) {
+; CHECK-SD-LABEL: interleave2_diff_nonconst_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w0
+; CHECK-SD-NEXT: mov v0.h[2], w1
+; CHECK-SD-NEXT: mov v0.h[3], w1
+; CHECK-SD-NEXT: rev32 v1.4h, v0.4h
+; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_diff_nonconst_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: dup v0.4h, w0
+; CHECK-GI-NEXT: dup v1.4h, w1
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
+ %ins1 = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <2 x i16> %ins1, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %ins2 = insertelement <2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <2 x i16> %ins2, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat1, <2 x i16> %splat2)
+ ret <4 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
index c4a027c..381904f 100644
--- a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
+++ b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
@@ -25,77 +25,58 @@ define void @test_interp(ptr %frame, ptr %dst) {
; CHECK-NEXT: adrp x21, _opcode.targets@PAGE
; CHECK-NEXT: Lloh1:
; CHECK-NEXT: add x21, x21, _opcode.targets@PAGEOFF
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov x24, xzr
; CHECK-NEXT: add x8, x21, xzr, lsl #3
; CHECK-NEXT: mov x19, x1
; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: add x23, x22, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: mov w22, #1 ; =0x1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp0: ; Block address taken
; CHECK-NEXT: LBB0_1: ; %loop.header
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: mov x22, xzr
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp1: ; Block address taken
; CHECK-NEXT: LBB0_2: ; %op1.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: str xzr, [x19]
-; CHECK-NEXT: mov w8, #1 ; =0x1
+; CHECK-NEXT: Ltmp2: ; Block address taken
+; CHECK-NEXT: LBB0_3: ; %op6.bb
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
+; CHECK-NEXT: ldr x8, [x0, #8]
+; CHECK-NEXT: str x22, [x0]
+; CHECK-NEXT: ldr x8, [x8, #48]
; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x8, x21, x24, lsl #3
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp2: ; Block address taken
-; CHECK-NEXT: LBB0_3: ; %op2.bb
+; CHECK-NEXT: Ltmp3: ; Block address taken
+; CHECK-NEXT: LBB0_4: ; %op2.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp3: ; Block address taken
-; CHECK-NEXT: LBB0_4: ; %op4.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
-; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
-; CHECK-NEXT: stp x8, x9, [x20, #-8]
-; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
; CHECK-NEXT: Ltmp4: ; Block address taken
-; CHECK-NEXT: LBB0_5: ; %op5.bb
+; CHECK-NEXT: LBB0_5: ; %op4.bb
+; CHECK-NEXT: Ltmp5: ; Block address taken
+; CHECK-NEXT: LBB0_6: ; %op5.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: ldur x8, [x23, #12]
; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
+; CHECK-NEXT: add x23, x23, #20
; CHECK-NEXT: stp x8, x9, [x20, #-8]
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
-; CHECK-NEXT: Ltmp5: ; Block address taken
-; CHECK-NEXT: LBB0_6: ; %op6.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: mov w8, #1 ; =0x1
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
-; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1
entry:
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 4f0c408..048e988 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -28,46 +28,28 @@ define i32 @test_udot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_udot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: uxtb w11, w11
-; CHECK-GI-NEXT: uxtb w13, w13
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: uaddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -128,46 +110,28 @@ define i32 @test_sdot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_sdot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: sshll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: sshll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: sshll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -205,22 +169,18 @@ define i32 @test_sdot_v4i8_double(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8
;
; CHECK-GI-LABEL: test_sdot_v4i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: shl v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: shl v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: shl v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: shl v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: sshr v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: mul v1.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
-; CHECK-GI-NEXT: addv s1, v1.4s
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: shl v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: shl v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: shl v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: sshr v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mul v1.4h, v2.4h, v3.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
+; CHECK-GI-NEXT: saddlv s1, v1.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -414,31 +374,60 @@ define i32 @test_udot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: umov w8, v1.b[4]
-; CHECK-GI-NEXT: umov w9, v0.b[4]
-; CHECK-GI-NEXT: umov w10, v1.b[0]
-; CHECK-GI-NEXT: umov w12, v0.b[0]
-; CHECK-GI-NEXT: umov w11, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: umov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: umov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: umov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: umov w8, v0.h[0]
+; CHECK-GI-NEXT: umov w9, v0.h[4]
+; CHECK-GI-NEXT: umov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: umov w8, v0.h[2]
+; CHECK-GI-NEXT: umov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -511,31 +500,60 @@ define i32 @test_sdot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: smov w8, v1.b[4]
-; CHECK-GI-NEXT: smov w9, v0.b[4]
-; CHECK-GI-NEXT: smov w10, v1.b[0]
-; CHECK-GI-NEXT: smov w12, v0.b[0]
-; CHECK-GI-NEXT: smov w11, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: smov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: smov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: smov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -571,59 +589,117 @@ define i32 @test_sdot_v5i8_double(<5 x i8> %a, <5 x i8> %b, <5 x i8> %c, <5 x i8
; CHECK-GI-LABEL: test_sdot_v5i8_double:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov b17, v0.b[1]
+; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3
-; CHECK-GI-NEXT: smov w9, v1.b[0]
-; CHECK-GI-NEXT: smov w10, v0.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[4]
-; CHECK-GI-NEXT: smov w12, v2.b[0]
-; CHECK-GI-NEXT: smov w13, v2.b[4]
-; CHECK-GI-NEXT: smov w14, v3.b[4]
-; CHECK-GI-NEXT: smov w8, v0.b[0]
-; CHECK-GI-NEXT: smov w16, v3.b[0]
-; CHECK-GI-NEXT: smov w15, v0.b[1]
-; CHECK-GI-NEXT: fmov s5, w9
-; CHECK-GI-NEXT: mul w9, w10, w11
-; CHECK-GI-NEXT: smov w10, v1.b[1]
-; CHECK-GI-NEXT: fmov s6, w12
-; CHECK-GI-NEXT: mul w12, w13, w14
-; CHECK-GI-NEXT: smov w11, v2.b[1]
-; CHECK-GI-NEXT: smov w13, v3.b[1]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: fmov s7, w16
-; CHECK-GI-NEXT: fmov s16, w9
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: smov w14, v1.b[2]
-; CHECK-GI-NEXT: fmov s17, w12
-; CHECK-GI-NEXT: smov w9, v3.b[2]
-; CHECK-GI-NEXT: mov v5.s[1], w10
-; CHECK-GI-NEXT: mov v4.s[1], w15
-; CHECK-GI-NEXT: smov w15, v2.b[2]
-; CHECK-GI-NEXT: mov v6.s[1], w11
-; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w13
-; CHECK-GI-NEXT: smov w10, v0.b[3]
-; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: smov w11, v1.b[3]
-; CHECK-GI-NEXT: smov w12, v2.b[3]
-; CHECK-GI-NEXT: smov w13, v3.b[3]
-; CHECK-GI-NEXT: mov v5.s[2], w14
-; CHECK-GI-NEXT: mov v4.s[2], w8
-; CHECK-GI-NEXT: mov v6.s[2], w15
-; CHECK-GI-NEXT: mov v16.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], w9
-; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], w10
-; CHECK-GI-NEXT: mov v6.s[3], w12
-; CHECK-GI-NEXT: mov v16.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], w13
-; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: mla v16.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mla v17.4s, v6.4s, v7.4s
-; CHECK-GI-NEXT: addv s0, v16.4s
-; CHECK-GI-NEXT: addv s1, v17.4s
+; CHECK-GI-NEXT: fmov w11, s1
+; CHECK-GI-NEXT: mov b25, v1.b[1]
+; CHECK-GI-NEXT: mov b16, v1.b[2]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b5, v1.b[4]
+; CHECK-GI-NEXT: mov b22, v2.b[1]
+; CHECK-GI-NEXT: mov b23, v3.b[1]
+; CHECK-GI-NEXT: sxtb w9, w8
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b24, v0.b[2]
+; CHECK-GI-NEXT: fmov w8, s17
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b4, v0.b[4]
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b18, v2.b[2]
+; CHECK-GI-NEXT: mov b19, v2.b[3]
+; CHECK-GI-NEXT: mov b0, v2.b[4]
+; CHECK-GI-NEXT: fmov w9, s25
+; CHECK-GI-NEXT: fmov w12, s22
+; CHECK-GI-NEXT: sxtb w10, w8
+; CHECK-GI-NEXT: mov b21, v3.b[2]
+; CHECK-GI-NEXT: fmov w13, s23
+; CHECK-GI-NEXT: mov b20, v3.b[3]
+; CHECK-GI-NEXT: mov b17, v3.b[4]
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: mov v1.h[1], w10
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov s2, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w11, s3
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v1.h[2], w8
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov s3, w10
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s22, w11
+; CHECK-GI-NEXT: fmov w11, s21
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w12
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: mov v22.h[1], w13
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: fmov w12, s6
+; CHECK-GI-NEXT: mov v3.h[2], w10
+; CHECK-GI-NEXT: fmov w10, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: fmov w11, s4
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v1.h[3], w12
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v1.h[4], w11
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mov v22.h[4], w10
+; CHECK-GI-NEXT: mul v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: mul v1.8h, v3.8h, v22.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w11, v0.h[1]
+; CHECK-GI-NEXT: smov w10, v1.h[0]
+; CHECK-GI-NEXT: smov w12, v1.h[4]
+; CHECK-GI-NEXT: smov w13, v1.h[1]
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v1.h[2]
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: fmov s5, w12
+; CHECK-GI-NEXT: mov v2.s[1], w11
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: smov w10, v0.h[3]
+; CHECK-GI-NEXT: smov w11, v1.h[3]
+; CHECK-GI-NEXT: mov v4.s[1], w13
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v2.s[2], w8
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v4.s[2], w9
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: mov v2.s[3], w10
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v4.s[3], w11
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -2303,11 +2379,14 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2316,132 +2395,282 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: umov w9, v1.b[4]
-; CHECK-GI-NEXT: umov w11, v1.b[5]
-; CHECK-GI-NEXT: umov w18, v1.b[0]
-; CHECK-GI-NEXT: umov w0, v1.b[12]
-; CHECK-GI-NEXT: umov w3, v7.b[4]
-; CHECK-GI-NEXT: umov w12, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v1.b[6]
-; CHECK-GI-NEXT: umov w1, v1.b[13]
-; CHECK-GI-NEXT: umov w4, v7.b[5]
-; CHECK-GI-NEXT: umov w15, v1.b[2]
-; CHECK-GI-NEXT: umov w8, v1.b[3]
-; CHECK-GI-NEXT: umov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: umov w14, v1.b[8]
-; CHECK-GI-NEXT: umov w17, v1.b[9]
-; CHECK-GI-NEXT: umov w10, v1.b[10]
-; CHECK-GI-NEXT: umov w9, v1.b[11]
-; CHECK-GI-NEXT: umov w5, v1.b[14]
-; CHECK-GI-NEXT: umov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: umov w7, v7.b[1]
-; CHECK-GI-NEXT: umov w18, v7.b[6]
-; CHECK-GI-NEXT: umov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: umov w12, v7.b[3]
-; CHECK-GI-NEXT: umov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: umov w13, v7.b[2]
-; CHECK-GI-NEXT: umov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: umov w23, v16.b[12]
-; CHECK-GI-NEXT: umov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: umov w6, v16.b[0]
-; CHECK-GI-NEXT: umov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: umov w16, v16.b[2]
-; CHECK-GI-NEXT: umov w15, v16.b[3]
-; CHECK-GI-NEXT: umov w22, v16.b[5]
-; CHECK-GI-NEXT: umov w5, v16.b[6]
-; CHECK-GI-NEXT: umov w18, v16.b[7]
-; CHECK-GI-NEXT: umov w19, v16.b[8]
-; CHECK-GI-NEXT: umov w7, v16.b[9]
-; CHECK-GI-NEXT: umov w24, v16.b[13]
-; CHECK-GI-NEXT: umov w1, v16.b[10]
-; CHECK-GI-NEXT: umov w17, v16.b[11]
-; CHECK-GI-NEXT: umov w20, v16.b[14]
-; CHECK-GI-NEXT: umov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: umov w21, v3.b[8]
-; CHECK-GI-NEXT: umov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: umov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: umov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: umov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: umov w24, v3.b[2]
-; CHECK-GI-NEXT: umov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: umov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b4, v2.b[1]
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[10]
+; CHECK-GI-NEXT: mov b22, v2.b[11]
+; CHECK-GI-NEXT: fmov w7, s2
+; CHECK-GI-NEXT: fmov w13, s6
+; CHECK-GI-NEXT: mov b6, v2.b[12]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: mov b7, v2.b[13]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: mov b16, v2.b[14]
+; CHECK-GI-NEXT: mov b23, v2.b[15]
+; CHECK-GI-NEXT: ldp q3, q2, [x0]
+; CHECK-GI-NEXT: fmov w26, s19
+; CHECK-GI-NEXT: fmov w19, s4
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov b4, v3.b[2]
+; CHECK-GI-NEXT: mov b5, v3.b[1]
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v1.b[1]
+; CHECK-GI-NEXT: fmov w22, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov b18, v1.b[2]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: uxtb w24, w24
+; CHECK-GI-NEXT: mov b19, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s6
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: mov b21, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s7
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b7, v1.b[5]
+; CHECK-GI-NEXT: mov b6, v3.b[3]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w12, s23
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: mov b23, v1.b[7]
+; CHECK-GI-NEXT: mov b20, v3.b[4]
+; CHECK-GI-NEXT: fmov w28, s4
+; CHECK-GI-NEXT: fmov s4, w26
+; CHECK-GI-NEXT: fmov w14, s16
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: fmov w5, s18
+; CHECK-GI-NEXT: uxtb w12, w12
+; CHECK-GI-NEXT: fmov w4, s19
+; CHECK-GI-NEXT: mov b19, v3.b[5]
+; CHECK-GI-NEXT: uxtb w28, w28
+; CHECK-GI-NEXT: fmov w3, s21
+; CHECK-GI-NEXT: mov b18, v3.b[6]
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w1, s7
+; CHECK-GI-NEXT: mov b16, v3.b[7]
+; CHECK-GI-NEXT: fmov w0, s22
+; CHECK-GI-NEXT: mov b17, v3.b[8]
+; CHECK-GI-NEXT: fmov w17, s23
+; CHECK-GI-NEXT: mov b7, v3.b[9]
+; CHECK-GI-NEXT: fmov w30, s5
+; CHECK-GI-NEXT: mov b5, v3.b[10]
+; CHECK-GI-NEXT: mov b21, v3.b[11]
+; CHECK-GI-NEXT: fmov w25, s6
+; CHECK-GI-NEXT: mov b6, v3.b[12]
+; CHECK-GI-NEXT: fmov w23, s20
+; CHECK-GI-NEXT: mov b20, v3.b[13]
+; CHECK-GI-NEXT: mov b22, v3.b[14]
+; CHECK-GI-NEXT: fmov w6, s3
+; CHECK-GI-NEXT: mov b23, v3.b[15]
+; CHECK-GI-NEXT: fmov s3, w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w21, s19
+; CHECK-GI-NEXT: mov b19, v2.b[1]
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: fmov w24, s6
+; CHECK-GI-NEXT: fmov w7, s16
+; CHECK-GI-NEXT: mov b16, v2.b[2]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w19
+; CHECK-GI-NEXT: uxtb w19, w29
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: uxtb w22, w6
+; CHECK-GI-NEXT: fmov s5, w8
+; CHECK-GI-NEXT: fmov w10, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: fmov w20, s18
+; CHECK-GI-NEXT: uxtb w29, w29
+; CHECK-GI-NEXT: fmov s6, w22
+; CHECK-GI-NEXT: fmov w22, s2
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v5.h[1], w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v2.b[3]
+; CHECK-GI-NEXT: mov v3.h[2], w19
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov v6.h[1], w27
+; CHECK-GI-NEXT: fmov w27, s19
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: fmov s16, w22
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: uxtb w5, w25
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: mov v3.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w4
+; CHECK-GI-NEXT: mov v6.h[2], w28
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: mov v16.h[1], w27
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov b20, v2.b[5]
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov w22, s17
+; CHECK-GI-NEXT: mov v5.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w2
+; CHECK-GI-NEXT: mov v6.h[3], w5
+; CHECK-GI-NEXT: mov b21, v2.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w18
+; CHECK-GI-NEXT: uxtb w18, w23
+; CHECK-GI-NEXT: mov v3.h[4], w13
+; CHECK-GI-NEXT: uxtb w13, w24
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w3
+; CHECK-GI-NEXT: mov v6.h[4], w18
+; CHECK-GI-NEXT: ldr w18, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w16
+; CHECK-GI-NEXT: uxtb w16, w22
+; CHECK-GI-NEXT: mov v5.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w15
+; CHECK-GI-NEXT: uxtb w18, w18
+; CHECK-GI-NEXT: mov v4.h[4], w9
+; CHECK-GI-NEXT: uxtb w9, w21
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[4], w16
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w1
+; CHECK-GI-NEXT: mov v3.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: fmov w19, s22
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: mov v5.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w0
+; CHECK-GI-NEXT: mov v4.h[5], w10
+; CHECK-GI-NEXT: uxtb w10, w20
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: mov v16.h[5], w11
+; CHECK-GI-NEXT: uxtb w8, w14
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v6.h[6], w10
+; CHECK-GI-NEXT: uxtb w10, w19
+; CHECK-GI-NEXT: fmov w6, s23
+; CHECK-GI-NEXT: mov v5.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: mov v3.h[6], w18
+; CHECK-GI-NEXT: mov v4.h[6], w8
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov v7.h[6], w10
+; CHECK-GI-NEXT: mov v16.h[6], w11
+; CHECK-GI-NEXT: uxtb w10, w6
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v6.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w17
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[7], w13
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[7], w12
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[7], w10
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: umov w8, v1.b[8]
+; CHECK-GI-NEXT: umov w9, v2.b[8]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: mul w15, w8, w9
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: umov w16, v3.h[0]
+; CHECK-GI-NEXT: umov w18, v3.h[4]
+; CHECK-GI-NEXT: umov w17, v3.h[1]
+; CHECK-GI-NEXT: umov w1, v2.h[0]
+; CHECK-GI-NEXT: umov w3, v2.h[4]
+; CHECK-GI-NEXT: umov w0, v3.h[5]
+; CHECK-GI-NEXT: umov w5, v1.h[0]
+; CHECK-GI-NEXT: umov w7, v1.h[4]
+; CHECK-GI-NEXT: umov w2, v2.h[1]
+; CHECK-GI-NEXT: umov w4, v2.h[5]
+; CHECK-GI-NEXT: umov w6, v1.h[1]
+; CHECK-GI-NEXT: umov w19, v1.h[5]
+; CHECK-GI-NEXT: umov w10, v3.h[2]
+; CHECK-GI-NEXT: umov w8, v3.h[3]
+; CHECK-GI-NEXT: umov w11, v3.h[6]
+; CHECK-GI-NEXT: umov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w16
+; CHECK-GI-NEXT: fmov s4, w18
+; CHECK-GI-NEXT: fmov s5, w1
+; CHECK-GI-NEXT: fmov s6, w3
+; CHECK-GI-NEXT: fmov s7, w5
+; CHECK-GI-NEXT: fmov s16, w7
+; CHECK-GI-NEXT: fmov s17, w15
+; CHECK-GI-NEXT: umov w12, v2.h[2]
+; CHECK-GI-NEXT: umov w13, v2.h[6]
+; CHECK-GI-NEXT: umov w14, v1.h[2]
+; CHECK-GI-NEXT: umov w16, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mov v4.s[1], w0
+; CHECK-GI-NEXT: mov v5.s[1], w2
+; CHECK-GI-NEXT: mov v6.s[1], w4
+; CHECK-GI-NEXT: mov v7.s[1], w6
+; CHECK-GI-NEXT: mov v16.s[1], w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: umov w15, v2.h[3]
+; CHECK-GI-NEXT: umov w17, v2.h[7]
+; CHECK-GI-NEXT: umov w18, v1.h[3]
+; CHECK-GI-NEXT: umov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w16
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w15
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2580,11 +2809,14 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2593,132 +2825,283 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: smov w9, v1.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[5]
-; CHECK-GI-NEXT: smov w18, v1.b[0]
-; CHECK-GI-NEXT: smov w0, v1.b[12]
-; CHECK-GI-NEXT: smov w3, v7.b[4]
-; CHECK-GI-NEXT: smov w12, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v1.b[6]
-; CHECK-GI-NEXT: smov w1, v1.b[13]
-; CHECK-GI-NEXT: smov w4, v7.b[5]
-; CHECK-GI-NEXT: smov w15, v1.b[2]
-; CHECK-GI-NEXT: smov w8, v1.b[3]
-; CHECK-GI-NEXT: smov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: smov w14, v1.b[8]
-; CHECK-GI-NEXT: smov w17, v1.b[9]
-; CHECK-GI-NEXT: smov w10, v1.b[10]
-; CHECK-GI-NEXT: smov w9, v1.b[11]
-; CHECK-GI-NEXT: smov w5, v1.b[14]
-; CHECK-GI-NEXT: smov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: smov w7, v7.b[1]
-; CHECK-GI-NEXT: smov w18, v7.b[6]
-; CHECK-GI-NEXT: smov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: smov w12, v7.b[3]
-; CHECK-GI-NEXT: smov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: smov w13, v7.b[2]
-; CHECK-GI-NEXT: smov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: smov w23, v16.b[12]
-; CHECK-GI-NEXT: smov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: smov w6, v16.b[0]
-; CHECK-GI-NEXT: smov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: smov w16, v16.b[2]
-; CHECK-GI-NEXT: smov w15, v16.b[3]
-; CHECK-GI-NEXT: smov w22, v16.b[5]
-; CHECK-GI-NEXT: smov w5, v16.b[6]
-; CHECK-GI-NEXT: smov w18, v16.b[7]
-; CHECK-GI-NEXT: smov w19, v16.b[8]
-; CHECK-GI-NEXT: smov w7, v16.b[9]
-; CHECK-GI-NEXT: smov w24, v16.b[13]
-; CHECK-GI-NEXT: smov w1, v16.b[10]
-; CHECK-GI-NEXT: smov w17, v16.b[11]
-; CHECK-GI-NEXT: smov w20, v16.b[14]
-; CHECK-GI-NEXT: smov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: smov w21, v3.b[8]
-; CHECK-GI-NEXT: smov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: smov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: smov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: smov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: smov w24, v3.b[2]
-; CHECK-GI-NEXT: smov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: smov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[15]
+; CHECK-GI-NEXT: mov b3, v2.b[1]
+; CHECK-GI-NEXT: fmov w19, s2
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: fmov w6, s5
+; CHECK-GI-NEXT: mov b5, v2.b[10]
+; CHECK-GI-NEXT: fmov w14, s6
+; CHECK-GI-NEXT: mov b6, v2.b[11]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: mov b7, v2.b[12]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: mov b16, v2.b[13]
+; CHECK-GI-NEXT: mov b18, v1.b[1]
+; CHECK-GI-NEXT: sxtb w6, w6
+; CHECK-GI-NEXT: mov b17, v2.b[14]
+; CHECK-GI-NEXT: ldp q4, q2, [x0]
+; CHECK-GI-NEXT: fmov w25, s19
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: fmov w22, s5
+; CHECK-GI-NEXT: mov b5, v1.b[2]
+; CHECK-GI-NEXT: fmov w0, s6
+; CHECK-GI-NEXT: sxtb w14, w14
+; CHECK-GI-NEXT: mov b20, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s7
+; CHECK-GI-NEXT: mov b7, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s16
+; CHECK-GI-NEXT: sxtb w25, w25
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov b16, v1.b[5]
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: sxtb w22, w22
+; CHECK-GI-NEXT: mov b6, v4.b[2]
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w0, w0
+; CHECK-GI-NEXT: mov b21, v1.b[7]
+; CHECK-GI-NEXT: mov b18, v4.b[4]
+; CHECK-GI-NEXT: fmov w7, s3
+; CHECK-GI-NEXT: mov b3, v4.b[1]
+; CHECK-GI-NEXT: fmov w12, s17
+; CHECK-GI-NEXT: fmov w5, s5
+; CHECK-GI-NEXT: mov b19, v4.b[3]
+; CHECK-GI-NEXT: fmov w4, s20
+; CHECK-GI-NEXT: fmov w3, s7
+; CHECK-GI-NEXT: sxtb w29, w7
+; CHECK-GI-NEXT: mov b17, v4.b[5]
+; CHECK-GI-NEXT: fmov w1, s16
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov b16, v4.b[6]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: mov b7, v4.b[7]
+; CHECK-GI-NEXT: fmov w17, s21
+; CHECK-GI-NEXT: mov b5, v4.b[8]
+; CHECK-GI-NEXT: mov b20, v4.b[9]
+; CHECK-GI-NEXT: fmov w27, s6
+; CHECK-GI-NEXT: mov b6, v4.b[10]
+; CHECK-GI-NEXT: mov b21, v4.b[11]
+; CHECK-GI-NEXT: fmov w21, s18
+; CHECK-GI-NEXT: mov b18, v4.b[12]
+; CHECK-GI-NEXT: mov b22, v4.b[13]
+; CHECK-GI-NEXT: mov b23, v4.b[14]
+; CHECK-GI-NEXT: fmov w10, s4
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v4.b[15]
+; CHECK-GI-NEXT: fmov s4, w25
+; CHECK-GI-NEXT: fmov w30, s3
+; CHECK-GI-NEXT: fmov s3, w28
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov w7, s7
+; CHECK-GI-NEXT: mov b7, v2.b[1]
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s1
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[1], w29
+; CHECK-GI-NEXT: fmov w29, s6
+; CHECK-GI-NEXT: fmov s6, w10
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov w19, s16
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b16, v2.b[3]
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b19, v2.b[2]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov s5, w24
+; CHECK-GI-NEXT: sxtb w24, w30
+; CHECK-GI-NEXT: mov v3.h[2], w6
+; CHECK-GI-NEXT: sxtb w6, w26
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: sxtb w23, w23
+; CHECK-GI-NEXT: mov v6.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: mov v5.h[1], w6
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: fmov w20, s17
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov v3.h[3], w14
+; CHECK-GI-NEXT: sxtb w14, w2
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v7.h[1], w8
+; CHECK-GI-NEXT: fmov w8, s16
+; CHECK-GI-NEXT: fmov s16, w10
+; CHECK-GI-NEXT: mov v6.h[2], w27
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: mov v4.h[3], w0
+; CHECK-GI-NEXT: sxtb w0, w4
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b18, v2.b[5]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: mov v16.h[1], w24
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov v3.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w25
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v6.h[3], w23
+; CHECK-GI-NEXT: mov v5.h[3], w0
+; CHECK-GI-NEXT: fmov w26, s22
+; CHECK-GI-NEXT: mov b19, v2.b[6]
+; CHECK-GI-NEXT: fmov w27, s18
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w28
+; CHECK-GI-NEXT: fmov w22, s23
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: fmov w6, s24
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w11
+; CHECK-GI-NEXT: sxtb w11, w21
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[3], w8
+; CHECK-GI-NEXT: sxtb w8, w16
+; CHECK-GI-NEXT: sxtb w16, w3
+; CHECK-GI-NEXT: mov v6.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v3.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w15
+; CHECK-GI-NEXT: sxtb w15, w27
+; CHECK-GI-NEXT: mov v7.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w1
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.h[4], w16
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w12
+; CHECK-GI-NEXT: sxtb w12, w18
+; CHECK-GI-NEXT: mov v4.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w19
+; CHECK-GI-NEXT: mov v5.h[5], w14
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v16.h[5], w15
+; CHECK-GI-NEXT: mov v3.h[6], w11
+; CHECK-GI-NEXT: sxtb w11, w22
+; CHECK-GI-NEXT: mov v6.h[6], w9
+; CHECK-GI-NEXT: sxtb w9, w13
+; CHECK-GI-NEXT: sxtb w13, w24
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: fmov w11, s17
+; CHECK-GI-NEXT: mov v4.h[6], w10
+; CHECK-GI-NEXT: sxtb w10, w7
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: mov v16.h[6], w13
+; CHECK-GI-NEXT: mov v3.h[7], w8
+; CHECK-GI-NEXT: sxtb w8, w6
+; CHECK-GI-NEXT: smov w12, v1.b[8]
+; CHECK-GI-NEXT: mov v6.h[7], w10
+; CHECK-GI-NEXT: sxtb w10, w17
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[7], w9
+; CHECK-GI-NEXT: mov v7.h[7], w8
+; CHECK-GI-NEXT: smov w8, v2.b[8]
+; CHECK-GI-NEXT: mov v5.h[7], w10
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[7], w11
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul w16, w12, w8
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: smov w17, v3.h[0]
+; CHECK-GI-NEXT: smov w0, v3.h[4]
+; CHECK-GI-NEXT: sxth w16, w16
+; CHECK-GI-NEXT: smov w2, v2.h[0]
+; CHECK-GI-NEXT: smov w4, v2.h[4]
+; CHECK-GI-NEXT: smov w18, v3.h[1]
+; CHECK-GI-NEXT: smov w1, v3.h[5]
+; CHECK-GI-NEXT: smov w3, v2.h[1]
+; CHECK-GI-NEXT: smov w5, v2.h[5]
+; CHECK-GI-NEXT: smov w6, v1.h[0]
+; CHECK-GI-NEXT: smov w19, v1.h[4]
+; CHECK-GI-NEXT: smov w7, v1.h[1]
+; CHECK-GI-NEXT: smov w20, v1.h[5]
+; CHECK-GI-NEXT: smov w10, v3.h[2]
+; CHECK-GI-NEXT: smov w8, v3.h[3]
+; CHECK-GI-NEXT: smov w11, v3.h[6]
+; CHECK-GI-NEXT: smov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w17
+; CHECK-GI-NEXT: fmov s4, w0
+; CHECK-GI-NEXT: fmov s5, w2
+; CHECK-GI-NEXT: fmov s6, w4
+; CHECK-GI-NEXT: fmov s7, w6
+; CHECK-GI-NEXT: fmov s16, w19
+; CHECK-GI-NEXT: fmov s17, w16
+; CHECK-GI-NEXT: smov w12, v2.h[2]
+; CHECK-GI-NEXT: smov w13, v2.h[6]
+; CHECK-GI-NEXT: smov w14, v1.h[2]
+; CHECK-GI-NEXT: smov w15, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w18
+; CHECK-GI-NEXT: mov v4.s[1], w1
+; CHECK-GI-NEXT: mov v5.s[1], w3
+; CHECK-GI-NEXT: mov v6.s[1], w5
+; CHECK-GI-NEXT: mov v7.s[1], w7
+; CHECK-GI-NEXT: mov v16.s[1], w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: smov w16, v2.h[3]
+; CHECK-GI-NEXT: smov w17, v2.h[7]
+; CHECK-GI-NEXT: smov w18, v1.h[3]
+; CHECK-GI-NEXT: smov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w15
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w16
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2948,349 +3331,535 @@ define i32 @test_sdot_v25i8_double(<25 x i8> %a, <25 x i8> %b, <25 x i8> %c, <25
;
; CHECK-GI-LABEL: test_sdot_v25i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp d11, d10, [sp, #-48]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #32] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
; CHECK-GI-NEXT: .cfi_offset w29, -16
-; CHECK-GI-NEXT: .cfi_offset b8, -24
-; CHECK-GI-NEXT: .cfi_offset b9, -32
-; CHECK-GI-NEXT: .cfi_offset b10, -40
-; CHECK-GI-NEXT: .cfi_offset b11, -48
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w10, w4
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w11, w2
-; CHECK-GI-NEXT: sxtb w13, w6
-; CHECK-GI-NEXT: ldr w12, [sp, #72]
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #16]
+; CHECK-GI-NEXT: lsl w10, w1, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #24]
+; CHECK-GI-NEXT: lsl w12, w4, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #56]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #64]
; CHECK-GI-NEXT: fmov s2, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #48]
-; CHECK-GI-NEXT: fmov s4, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #80]
-; CHECK-GI-NEXT: ldr w14, [sp, #128]
-; CHECK-GI-NEXT: ldr w15, [sp, #152]
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #112]
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #32]
+; CHECK-GI-NEXT: sbfx w9, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v4.h[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #152]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: mov v2.s[1], w9
-; CHECK-GI-NEXT: sxtb w9, w5
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s3, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #88]
-; CHECK-GI-NEXT: ldr x29, [sp, #32] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v4.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #56]
-; CHECK-GI-NEXT: fmov s5, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w3
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #40]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #64]
-; CHECK-GI-NEXT: mov v5.s[1], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #104]
+; CHECK-GI-NEXT: mov v4.h[2], w10
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #160]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #96]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v4.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #120]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #112]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w11
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #48]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s3, w8
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #80]
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #168]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], w11
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: mov v5.s[2], w9
-; CHECK-GI-NEXT: sxtb w9, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #144]
-; CHECK-GI-NEXT: ldr w10, [sp, #136]
-; CHECK-GI-NEXT: fmov s6, w11
-; CHECK-GI-NEXT: sxtb w11, w7
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[4], w12
+; CHECK-GI-NEXT: lsl w12, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v3.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #88]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: lsl w8, w10, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #176]
+; CHECK-GI-NEXT: mov v2.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: fmov s6, w9
+; CHECK-GI-NEXT: sbfx w15, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w10, #8
+; CHECK-GI-NEXT: mov v3.h[2], w11
+; CHECK-GI-NEXT: sbfx w11, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #96]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[5], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #224]
+; CHECK-GI-NEXT: mov v6.h[1], w15
+; CHECK-GI-NEXT: mov v2.h[6], w11
+; CHECK-GI-NEXT: lsl w15, w7, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w12, [sp, #104]
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #216]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v2.h[7], w15
+; CHECK-GI-NEXT: lsl w15, w9, #8
+; CHECK-GI-NEXT: mov v4.h[6], w14
+; CHECK-GI-NEXT: mov v6.h[2], w10
+; CHECK-GI-NEXT: lsl w10, w13, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #288]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v3.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #192]
+; CHECK-GI-NEXT: fmov s5, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #232]
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v6.h[3], w12
+; CHECK-GI-NEXT: ldr w8, [sp, #72]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #184]
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: mov v6.s[1], w9
-; CHECK-GI-NEXT: fmov s7, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w11, [sp, #160]
-; CHECK-GI-NEXT: mov v7.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #224]
-; CHECK-GI-NEXT: mov v3.s[3], w9
-; CHECK-GI-NEXT: mov v6.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #192]
-; CHECK-GI-NEXT: fmov s16, w8
-; CHECK-GI-NEXT: fmov s18, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #168]
-; CHECK-GI-NEXT: ldr w13, [sp, #208]
-; CHECK-GI-NEXT: mov v7.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #256]
-; CHECK-GI-NEXT: ldr w8, [sp, #176]
-; CHECK-GI-NEXT: mov v16.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #200]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #232]
-; CHECK-GI-NEXT: mov v6.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #248]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #240]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v18.s[2], w14
-; CHECK-GI-NEXT: fmov s17, w10
+; CHECK-GI-NEXT: mov v5.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #280]
+; CHECK-GI-NEXT: sbfx w15, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #240]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v3.h[5], w15
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #296]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #200]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v6.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #304]
+; CHECK-GI-NEXT: ldr w11, [sp, #128]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v5.h[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #208]
+; CHECK-GI-NEXT: mov v7.h[1], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #248]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v3.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w14, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v7.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #256]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #320]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v5.h[4], w13
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #312]
+; CHECK-GI-NEXT: mov v3.h[7], w10
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v4.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[3], w14
; CHECK-GI-NEXT: ldr w14, [sp, #264]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #288]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w15, [sp, #392]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #280]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #312]
-; CHECK-GI-NEXT: mov v16.s[3], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #296]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w9, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #136]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v5.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mul v16.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v6.h[6], w11
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #328]
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #336]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: smov w9, v16.h[0]
+; CHECK-GI-NEXT: smov w15, v16.h[4]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: smov w17, v16.h[5]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mov v17.s[2], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #320]
-; CHECK-GI-NEXT: fmov s20, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #344]
-; CHECK-GI-NEXT: fmov s19, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w12, [sp, #304]
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v18.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
-; CHECK-GI-NEXT: mov v19.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #328]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s21, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #336]
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v16.h[1]
+; CHECK-GI-NEXT: mov v7.h[5], w14
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: ldr w14, [sp, #344]
+; CHECK-GI-NEXT: ldr w8, [sp, #352]
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: fmov s3, w15
+; CHECK-GI-NEXT: lsl w9, w12, #8
+; CHECK-GI-NEXT: sbfx w12, w16, #8, #8
; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v17.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #376]
-; CHECK-GI-NEXT: mov v20.s[2], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #368]
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v19.s[2], w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w14, [sp, #384]
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v20.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: mul w10, w8, w10
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: mov v19.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #416]
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: ldr w9, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #432]
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #448]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v21.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #424]
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #480]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v23.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #456]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s24, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #440]
-; CHECK-GI-NEXT: mov v25.s[1], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: smov w15, v16.h[3]
+; CHECK-GI-NEXT: mov v2.s[1], w13
+; CHECK-GI-NEXT: smov w13, v16.h[2]
+; CHECK-GI-NEXT: mov v6.h[7], w12
+; CHECK-GI-NEXT: smov w12, v16.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mul v18.8h, v4.8h, v5.8h
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w9, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #368]
+; CHECK-GI-NEXT: mov v2.s[2], w13
+; CHECK-GI-NEXT: smov w13, v16.h[7]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v3.s[2], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: mul w8, w10, w14
+; CHECK-GI-NEXT: smov w10, v18.h[0]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #376]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w12, v18.h[1]
+; CHECK-GI-NEXT: mov v7.h[7], w16
+; CHECK-GI-NEXT: mov v2.s[3], w15
+; CHECK-GI-NEXT: smov w15, v18.h[4]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v3.s[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #416]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: mov v16.h[1], w11
+; CHECK-GI-NEXT: ldr w10, [sp, #424]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #384]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: fmov s5, w15
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #432]
+; CHECK-GI-NEXT: mov v4.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[5]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v7.8h, v6.8h, v7.8h
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #392]
+; CHECK-GI-NEXT: ldr w13, [sp, #400]
+; CHECK-GI-NEXT: mov v5.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: mov v16.h[3], w14
+; CHECK-GI-NEXT: ldr w10, [sp, #440]
+; CHECK-GI-NEXT: smov w14, v18.h[6]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #456]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sxth w8, w8
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v4.s[2], w12
+; CHECK-GI-NEXT: smov w12, v18.h[3]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v17.h[2], w15
+; CHECK-GI-NEXT: mov v16.h[4], w11
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[7]
+; CHECK-GI-NEXT: ldr w15, [sp, #448]
+; CHECK-GI-NEXT: ldr w11, [sp, #408]
+; CHECK-GI-NEXT: mov v4.s[3], w12
+; CHECK-GI-NEXT: smov w12, v7.h[0]
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #480]
+; CHECK-GI-NEXT: mov v16.h[5], w9
+; CHECK-GI-NEXT: lsl w9, w13, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: mov v5.s[3], w14
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: smov w14, v7.h[1]
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: fmov s6, w12
; CHECK-GI-NEXT: ldr w12, [sp, #488]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: ldr w15, [sp, #504]
-; CHECK-GI-NEXT: ldr w11, [sp, #472]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v24.s[1], w14
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v17.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #496]
+; CHECK-GI-NEXT: fmov s18, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #552]
+; CHECK-GI-NEXT: mov v6.s[1], w14
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
; CHECK-GI-NEXT: ldr w14, [sp, #464]
-; CHECK-GI-NEXT: mov v23.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w8, [sp, #408]
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #496]
-; CHECK-GI-NEXT: mov v25.s[2], w10
+; CHECK-GI-NEXT: mov v16.h[6], w9
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v18.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v17.h[5], w15
+; CHECK-GI-NEXT: sbfx w15, w10, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
; CHECK-GI-NEXT: ldr w10, [sp, #512]
-; CHECK-GI-NEXT: sxtb w9, w14
-; CHECK-GI-NEXT: ldr w14, [sp, #520]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v22.s[1], wzr
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #528]
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w13, w15
-; CHECK-GI-NEXT: fmov s27, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w15, [sp, #552]
-; CHECK-GI-NEXT: mov v25.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #544]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #560]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s28, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #616]
-; CHECK-GI-NEXT: mov v27.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #592]
-; CHECK-GI-NEXT: ldr w15, [sp, #568]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w8, [sp, #536]
-; CHECK-GI-NEXT: ldr w10, [sp, #576]
-; CHECK-GI-NEXT: mov v28.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s30, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #600]
-; CHECK-GI-NEXT: mov v27.s[2], w9
-; CHECK-GI-NEXT: mov v29.s[1], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #608]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #632]
-; CHECK-GI-NEXT: mov v28.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #640]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v29.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #664]
-; CHECK-GI-NEXT: mov v30.s[2], w13
-; CHECK-GI-NEXT: mov v28.s[3], w14
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: ldr w15, [sp, #616]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v16.h[7], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #504]
+; CHECK-GI-NEXT: mov v18.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v17.h[6], w14
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #576]
+; CHECK-GI-NEXT: mov v19.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #624]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s20, w14
; CHECK-GI-NEXT: ldr w14, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #656]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v29.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #688]
-; CHECK-GI-NEXT: fmov s31, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: mov v30.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s8, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w14, [sp, #712]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[1], w13
+; CHECK-GI-NEXT: mov v18.h[3], w16
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v19.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v20.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #688]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v18.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #584]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: fmov s21, w14
+; CHECK-GI-NEXT: ldr w15, [sp, #640]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v20.h[2], w13
; CHECK-GI-NEXT: ldr w13, [sp, #696]
-; CHECK-GI-NEXT: mov v8.s[1], w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w9, [sp, #720]
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #776]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s10, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v22.s[2], wzr
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #704]
-; CHECK-GI-NEXT: mov v9.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #760]
-; CHECK-GI-NEXT: mov v8.s[2], w13
-; CHECK-GI-NEXT: mul w10, w10, w11
-; CHECK-GI-NEXT: mov v10.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #728]
-; CHECK-GI-NEXT: sxtb w11, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v20.4s
-; CHECK-GI-NEXT: mul v7.4s, v7.4s, v21.4s
-; CHECK-GI-NEXT: mul v18.4s, v25.4s, v30.4s
-; CHECK-GI-NEXT: mov v22.s[3], wzr
-; CHECK-GI-NEXT: fmov s11, w10
-; CHECK-GI-NEXT: mov v9.s[2], w11
-; CHECK-GI-NEXT: ldr w10, [sp, #768]
-; CHECK-GI-NEXT: mov v8.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #672]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v21.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #592]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v19.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #704]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.h[3], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #648]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v21.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #600]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v19.h[5], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #656]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v21.h[3], w10
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #608]
+; CHECK-GI-NEXT: mov v20.h[4], w15
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #528]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: mov v19.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #720]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w16, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v20.h[5], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #728]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v19.h[7], w16
+; CHECK-GI-NEXT: ldr w9, [sp, #472]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v18.h[6], w14
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v21.h[5], w13
+; CHECK-GI-NEXT: ldr w15, [sp, #672]
+; CHECK-GI-NEXT: ldr w11, [sp, #536]
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v20.h[6], w14
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v19.8h, v16.8h, v19.8h
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v21.h[6], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v7.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w12, v7.h[4]
+; CHECK-GI-NEXT: mov v17.h[7], w9
+; CHECK-GI-NEXT: mov v20.h[7], w15
+; CHECK-GI-NEXT: smov w9, v7.h[5]
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w11, v19.h[4]
+; CHECK-GI-NEXT: ldr w15, [sp, #744]
+; CHECK-GI-NEXT: mov v21.h[7], w13
+; CHECK-GI-NEXT: mov v6.s[2], w14
+; CHECK-GI-NEXT: smov w14, v19.h[0]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w13, v19.h[5]
+; CHECK-GI-NEXT: smov w12, v19.h[1]
+; CHECK-GI-NEXT: mul v20.8h, v17.8h, v20.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #544]
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: mul v22.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: fmov s18, w11
+; CHECK-GI-NEXT: mov v16.s[1], w9
+; CHECK-GI-NEXT: fmov s17, w14
+; CHECK-GI-NEXT: smov w14, v7.h[6]
+; CHECK-GI-NEXT: smov w11, v19.h[2]
+; CHECK-GI-NEXT: smov w9, v7.h[3]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mla v5.4s, v3.4s, v17.4s
-; CHECK-GI-NEXT: mov v11.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: mov v9.s[3], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mla v7.4s, v6.4s, v19.4s
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v20.4s, v26.4s, v8.4s
-; CHECK-GI-NEXT: mla v18.4s, v23.4s, v29.4s
-; CHECK-GI-NEXT: mov v31.s[3], w9
-; CHECK-GI-NEXT: add v1.4s, v22.4s, v1.4s
-; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mov v11.s[2], wzr
-; CHECK-GI-NEXT: mov v10.s[3], w8
-; CHECK-GI-NEXT: mul v21.4s, v28.4s, v9.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v7.4s, v1.4s
-; CHECK-GI-NEXT: mla v20.4s, v24.4s, v31.4s
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: mla v21.4s, v27.4s, v10.4s
+; CHECK-GI-NEXT: fmov s21, w8
+; CHECK-GI-NEXT: mov v18.s[1], w13
+; CHECK-GI-NEXT: sxtb w13, w15
+; CHECK-GI-NEXT: smov w15, v20.h[0]
+; CHECK-GI-NEXT: mov v17.s[1], w12
+; CHECK-GI-NEXT: smov w8, v7.h[7]
+; CHECK-GI-NEXT: smov w12, v19.h[6]
+; CHECK-GI-NEXT: mov v16.s[2], w14
+; CHECK-GI-NEXT: smov w14, v20.h[1]
+; CHECK-GI-NEXT: mul w10, w10, w13
+; CHECK-GI-NEXT: smov w13, v20.h[4]
+; CHECK-GI-NEXT: smov w16, v20.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
+; CHECK-GI-NEXT: fmov s7, w15
+; CHECK-GI-NEXT: smov w15, v20.h[2]
+; CHECK-GI-NEXT: mov v6.s[3], w9
+; CHECK-GI-NEXT: mov v17.s[2], w11
+; CHECK-GI-NEXT: smov w11, v22.h[0]
+; CHECK-GI-NEXT: sxth w10, w10
+; CHECK-GI-NEXT: mov v18.s[2], w12
+; CHECK-GI-NEXT: smov w12, v22.h[1]
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v7.s[1], w14
+; CHECK-GI-NEXT: smov w14, v22.h[4]
+; CHECK-GI-NEXT: fmov s23, w13
+; CHECK-GI-NEXT: smov w13, v22.h[5]
+; CHECK-GI-NEXT: fmov s26, w10
+; CHECK-GI-NEXT: smov w10, v19.h[7]
+; CHECK-GI-NEXT: fmov s24, w11
+; CHECK-GI-NEXT: smov w11, v20.h[6]
+; CHECK-GI-NEXT: mov v21.s[2], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w16
+; CHECK-GI-NEXT: add v4.4s, v6.4s, v16.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: fmov s25, w14
+; CHECK-GI-NEXT: smov w14, v22.h[2]
+; CHECK-GI-NEXT: mov v26.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], w12
+; CHECK-GI-NEXT: smov w12, v19.h[3]
+; CHECK-GI-NEXT: mov v7.s[2], w15
+; CHECK-GI-NEXT: smov w15, v20.h[3]
+; CHECK-GI-NEXT: mov v18.s[3], w10
+; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: mov v25.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[6]
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: smov w11, v20.h[7]
+; CHECK-GI-NEXT: mov v26.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], w14
+; CHECK-GI-NEXT: smov w14, v22.h[3]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: mov v7.s[3], w15
+; CHECK-GI-NEXT: add v1.4s, v21.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[2], w13
+; CHECK-GI-NEXT: smov w13, v22.h[7]
+; CHECK-GI-NEXT: mov v23.s[3], w11
+; CHECK-GI-NEXT: mov v26.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], w14
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v18.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[3], w13
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v23.4s
+; CHECK-GI-NEXT: add v0.4s, v26.4s, v0.4s
; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: add v3.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v11.4s, v0.4s
+; CHECK-GI-NEXT: add v7.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: add v3.4s, v5.4s, v6.4s
; CHECK-GI-NEXT: addv s1, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v7.4s, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: ldp d11, d10, [sp], #48 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <25 x i8> %a to <25 x i32>
@@ -3972,197 +4541,412 @@ define i32 @test_udot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s6, wzr
; CHECK-GI-NEXT: fmov s0, wzr
; CHECK-GI-NEXT: fmov s1, wzr
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: umov w8, v21.b[0]
-; CHECK-GI-NEXT: umov w9, v21.b[4]
-; CHECK-GI-NEXT: umov w10, v21.b[1]
-; CHECK-GI-NEXT: umov w13, v21.b[8]
-; CHECK-GI-NEXT: umov w11, v21.b[5]
-; CHECK-GI-NEXT: umov w14, v21.b[9]
-; CHECK-GI-NEXT: umov w15, v25.b[0]
-; CHECK-GI-NEXT: umov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: mov b29, v7.b[13]
+; CHECK-GI-NEXT: mov b30, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[7]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w13, s19
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b17, v16.b[2]
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w23, s21
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v16.b[1]
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: fmov w5, s17
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b21, v16.b[5]
+; CHECK-GI-NEXT: fmov s17, w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: fmov w20, s22
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w16
+; CHECK-GI-NEXT: mov b19, v16.b[3]
+; CHECK-GI-NEXT: mov b22, v16.b[4]
+; CHECK-GI-NEXT: mov b20, v16.b[6]
+; CHECK-GI-NEXT: fmov w21, s27
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: fmov w24, s28
+; CHECK-GI-NEXT: mov b24, v16.b[8]
+; CHECK-GI-NEXT: fmov w22, s29
+; CHECK-GI-NEXT: mov b26, v16.b[9]
+; CHECK-GI-NEXT: fmov w4, s30
+; CHECK-GI-NEXT: uxtb w10, w21
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w13
+; CHECK-GI-NEXT: str s20, [sp] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b25, v16.b[10]
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b27, v16.b[11]
+; CHECK-GI-NEXT: mov b28, v16.b[12]
+; CHECK-GI-NEXT: mov b29, v16.b[13]
+; CHECK-GI-NEXT: mov b30, v16.b[14]
+; CHECK-GI-NEXT: fmov w26, s16
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w27
+; CHECK-GI-NEXT: mov b31, v16.b[15]
+; CHECK-GI-NEXT: ldp q18, q16, [x0]
+; CHECK-GI-NEXT: fmov w2, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov v17.h[2], w9
+; CHECK-GI-NEXT: fmov w14, s22
+; CHECK-GI-NEXT: fmov w3, s25
+; CHECK-GI-NEXT: fmov w15, s19
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: uxtb w8, w23
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: mov b25, v18.b[5]
+; CHECK-GI-NEXT: mov b23, v18.b[6]
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: uxtb w3, w3
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w24
+; CHECK-GI-NEXT: uxtb w24, w7
+; CHECK-GI-NEXT: mov b19, v18.b[3]
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w20
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v18.b[10]
+; CHECK-GI-NEXT: fmov w9, s22
+; CHECK-GI-NEXT: fmov w6, s26
+; CHECK-GI-NEXT: mov v17.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w25
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b26, v18.b[4]
+; CHECK-GI-NEXT: fmov w18, s28
+; CHECK-GI-NEXT: fmov w16, s29
+; CHECK-GI-NEXT: mov v7.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s18
+; CHECK-GI-NEXT: mov b24, v18.b[7]
+; CHECK-GI-NEXT: fmov w30, s21
+; CHECK-GI-NEXT: mov b20, v18.b[8]
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: uxtb w16, w16
+; CHECK-GI-NEXT: mov b28, v18.b[11]
+; CHECK-GI-NEXT: mov b29, v18.b[12]
+; CHECK-GI-NEXT: fmov w23, s25
+; CHECK-GI-NEXT: mov b25, v18.b[13]
+; CHECK-GI-NEXT: fmov w21, s23
+; CHECK-GI-NEXT: mov v7.h[7], w24
+; CHECK-GI-NEXT: uxtb w24, w8
+; CHECK-GI-NEXT: uxtb w8, w9
+; CHECK-GI-NEXT: uxtb w9, w29
+; CHECK-GI-NEXT: mov b23, v18.b[14]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: fmov s21, w24
+; CHECK-GI-NEXT: fmov s18, w26
+; CHECK-GI-NEXT: fmov w28, s19
+; CHECK-GI-NEXT: mov b19, v16.b[1]
+; CHECK-GI-NEXT: mov v17.h[5], w22
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov v21.h[1], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov v18.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w5
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: fmov w24, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w4, s22
+; CHECK-GI-NEXT: mov b22, v16.b[8]
+; CHECK-GI-NEXT: mov v17.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s19
+; CHECK-GI-NEXT: fmov s19, w19
+; CHECK-GI-NEXT: mov v21.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w28
+; CHECK-GI-NEXT: mov v18.h[2], w10
+; CHECK-GI-NEXT: uxtb w10, w6
+; CHECK-GI-NEXT: mov b27, v16.b[9]
+; CHECK-GI-NEXT: fmov w20, s24
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b24, v16.b[3]
+; CHECK-GI-NEXT: fmov w5, s20
+; CHECK-GI-NEXT: mov v19.h[1], w10
+; CHECK-GI-NEXT: fmov w10, s23
+; CHECK-GI-NEXT: fmov s23, w7
+; CHECK-GI-NEXT: mov v21.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w7, s22
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: mov b20, v16.b[10]
+; CHECK-GI-NEXT: fmov w22, s28
+; CHECK-GI-NEXT: fmov w25, s25
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b25, v16.b[5]
+; CHECK-GI-NEXT: fmov w29, s24
+; CHECK-GI-NEXT: fmov s22, w7
+; CHECK-GI-NEXT: mov v23.h[2], w27
+; CHECK-GI-NEXT: mov b24, v16.b[11]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: fmov w26, s29
+; CHECK-GI-NEXT: mov b26, v16.b[4]
+; CHECK-GI-NEXT: mov v19.h[2], w3
+; CHECK-GI-NEXT: uxtb w3, w29
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[1], w11
+; CHECK-GI-NEXT: uxtb w11, w15
+; CHECK-GI-NEXT: uxtb w15, w22
+; CHECK-GI-NEXT: uxtb w22, w23
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: fmov w6, s25
+; CHECK-GI-NEXT: mov v18.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: mov v23.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w17
+; CHECK-GI-NEXT: uxtb w17, w21
+; CHECK-GI-NEXT: mov b25, v16.b[12]
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v21.h[5], w22
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: uxtb w11, w14
+; CHECK-GI-NEXT: uxtb w14, w26
+; CHECK-GI-NEXT: mov v20.h[2], w5
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w19, s26
+; CHECK-GI-NEXT: mov v18.h[4], w11
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v23.h[4], w14
+; CHECK-GI-NEXT: uxtb w14, w25
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b26, v16.b[13]
+; CHECK-GI-NEXT: fmov w7, s25
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w18
+; CHECK-GI-NEXT: uxtb w18, w19
+; CHECK-GI-NEXT: mov v21.h[6], w17
+; CHECK-GI-NEXT: uxtb w17, w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w2
+; CHECK-GI-NEXT: mov v20.h[3], w3
+; CHECK-GI-NEXT: mov v23.h[5], w14
+; CHECK-GI-NEXT: uxtb w14, w24
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w7
+; CHECK-GI-NEXT: fmov w8, s26
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v21.h[7], w17
+; CHECK-GI-NEXT: uxtb w17, w6
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w15, w15
+; CHECK-GI-NEXT: fmov w13, s30
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v20.h[4], w18
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v19.h[5], w16
+; CHECK-GI-NEXT: mov b27, v16.b[14]
+; CHECK-GI-NEXT: mul v24.8h, v7.8h, v21.8h
+; CHECK-GI-NEXT: mov v22.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov b7, v16.b[7]
+; CHECK-GI-NEXT: mov b16, v16.b[15]
+; CHECK-GI-NEXT: fmov w12, s31
+; CHECK-GI-NEXT: mov v17.h[7], w11
+; CHECK-GI-NEXT: uxtb w11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v20.h[5], w17
+; CHECK-GI-NEXT: mov v23.h[7], w8
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov v18.h[6], w15
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov v19.h[6], w11
+; CHECK-GI-NEXT: fmov w12, s16
+; CHECK-GI-NEXT: fmov w11, s7
; CHECK-GI-NEXT: fmov s4, wzr
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v20.h[6], w10
+; CHECK-GI-NEXT: umov w10, v24.h[0]
+; CHECK-GI-NEXT: mul v21.8h, v17.8h, v23.8h
+; CHECK-GI-NEXT: mov v18.h[7], w13
; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v19.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: umov w12, v24.h[4]
+; CHECK-GI-NEXT: mov v22.h[6], w9
+; CHECK-GI-NEXT: umov w9, v24.h[1]
+; CHECK-GI-NEXT: mov v20.h[7], w11
+; CHECK-GI-NEXT: umov w11, v24.h[5]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldrb w10, [x1, #32]
+; CHECK-GI-NEXT: umov w13, v21.h[0]
+; CHECK-GI-NEXT: umov w14, v21.h[1]
+; CHECK-GI-NEXT: umov w15, v21.h[4]
; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: umov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: umov w9, v21.b[3]
-; CHECK-GI-NEXT: umov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: umov w15, v25.b[4]
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: umov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: umov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: mov v22.h[7], w8
+; CHECK-GI-NEXT: umov w12, v24.h[6]
+; CHECK-GI-NEXT: umov w8, v24.h[2]
+; CHECK-GI-NEXT: mov v7.s[1], w9
+; CHECK-GI-NEXT: ldrb w9, [x0, #32]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mul v23.8h, v18.8h, v20.8h
+; CHECK-GI-NEXT: umov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v16.s[1], w11
+; CHECK-GI-NEXT: umov w11, v21.h[5]
+; CHECK-GI-NEXT: fmov s18, w15
+; CHECK-GI-NEXT: mul v19.8h, v19.8h, v22.8h
+; CHECK-GI-NEXT: umov w15, v21.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: mov v17.s[1], w14
+; CHECK-GI-NEXT: umov w14, v21.h[2]
+; CHECK-GI-NEXT: mov v7.s[2], w8
+; CHECK-GI-NEXT: mul w8, w10, w9
+; CHECK-GI-NEXT: umov w9, v23.h[0]
+; CHECK-GI-NEXT: umov w10, v23.h[1]
+; CHECK-GI-NEXT: mov v16.s[2], w12
+; CHECK-GI-NEXT: umov w12, v21.h[3]
+; CHECK-GI-NEXT: mov v18.s[1], w11
+; CHECK-GI-NEXT: umov w11, v23.h[4]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: umov w15, v25.b[13]
+; CHECK-GI-NEXT: mov v17.s[2], w14
+; CHECK-GI-NEXT: umov w14, v23.h[5]
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: umov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: umov w8, v21.b[11]
-; CHECK-GI-NEXT: umov w10, v21.b[14]
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: umov w9, v19.h[1]
; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: umov w11, v25.b[2]
+; CHECK-GI-NEXT: mov v16.s[3], w13
+; CHECK-GI-NEXT: umov w13, v19.h[0]
+; CHECK-GI-NEXT: mov v18.s[2], w15
+; CHECK-GI-NEXT: umov w15, v21.h[7]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: umov w11, v23.h[2]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: umov w12, v19.h[4]
+; CHECK-GI-NEXT: mov v20.s[1], w10
+; CHECK-GI-NEXT: umov w10, v23.h[3]
; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: umov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: umov w14, v21.b[15]
-; CHECK-GI-NEXT: umov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: umov w13, v25.b[12]
+; CHECK-GI-NEXT: umov w16, v24.h[3]
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: umov w13, v19.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: umov w14, v23.h[6]
+; CHECK-GI-NEXT: mov v18.s[3], w15
+; CHECK-GI-NEXT: umov w15, v19.h[2]
+; CHECK-GI-NEXT: mov v20.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[6]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: umov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: umov w8, v26.b[5]
-; CHECK-GI-NEXT: umov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v22.s[1], w9
+; CHECK-GI-NEXT: umov w9, v23.h[7]
+; CHECK-GI-NEXT: fmov s23, w12
+; CHECK-GI-NEXT: umov w12, v19.h[3]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: umov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: umov w9, v25.b[10]
-; CHECK-GI-NEXT: umov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: umov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: umov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: umov w12, v26.b[4]
-; CHECK-GI-NEXT: umov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: umov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: umov w13, v26.b[9]
+; CHECK-GI-NEXT: mov v21.s[2], w14
; CHECK-GI-NEXT: mov v2.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: umov w15, v25.b[15]
+; CHECK-GI-NEXT: mov v23.s[1], w13
; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[6]
-; CHECK-GI-NEXT: umov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: umov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: umov w10, v26.b[3]
; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: umov w15, v22.b[0]
-; CHECK-GI-NEXT: umov w8, v26.b[7]
+; CHECK-GI-NEXT: mov v22.s[2], w15
+; CHECK-GI-NEXT: mov v7.s[3], w16
+; CHECK-GI-NEXT: mov v20.s[3], w10
; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: umov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: umov w10, v22.b[1]
-; CHECK-GI-NEXT: umov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: umov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: umov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: umov w15, v22.b[12]
-; CHECK-GI-NEXT: umov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[13]
+; CHECK-GI-NEXT: mov v21.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: umov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: umov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: umov w15, v26.b[15]
-; CHECK-GI-NEXT: umov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: umov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrb w14, [x1, #32]
-; CHECK-GI-NEXT: umov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[7]
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: mov v22.s[3], w12
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: umov w9, v22.b[7]
-; CHECK-GI-NEXT: umov w11, v22.b[3]
; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v7.4s, v17.4s, v18.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: umov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: add v16.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: mov v23.s[3], w11
; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: umov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: add v17.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v0.4s, v19.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4359,197 +5143,412 @@ define i32 @test_sdot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
-; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
-; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: smov w8, v21.b[0]
-; CHECK-GI-NEXT: smov w9, v21.b[4]
-; CHECK-GI-NEXT: smov w10, v21.b[1]
-; CHECK-GI-NEXT: smov w13, v21.b[8]
-; CHECK-GI-NEXT: smov w11, v21.b[5]
-; CHECK-GI-NEXT: smov w14, v21.b[9]
-; CHECK-GI-NEXT: smov w15, v25.b[0]
-; CHECK-GI-NEXT: smov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: smov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: smov w9, v21.b[3]
-; CHECK-GI-NEXT: smov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: smov w15, v25.b[4]
-; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: smov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: smov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s6, wzr
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: fmov w14, s19
+; CHECK-GI-NEXT: mov b19, v7.b[13]
+; CHECK-GI-NEXT: mov b29, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b30, v16.b[1]
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w24, s21
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w22, s22
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov b17, v16.b[3]
+; CHECK-GI-NEXT: mov b21, v16.b[4]
+; CHECK-GI-NEXT: mov b18, v16.b[5]
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: fmov w25, s27
+; CHECK-GI-NEXT: mov b22, v16.b[7]
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w16
+; CHECK-GI-NEXT: mov b25, v16.b[8]
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b24, v16.b[9]
+; CHECK-GI-NEXT: fmov w5, s29
+; CHECK-GI-NEXT: mov b26, v16.b[10]
+; CHECK-GI-NEXT: mov b19, v16.b[11]
+; CHECK-GI-NEXT: fmov w6, s30
+; CHECK-GI-NEXT: mov b27, v16.b[12]
+; CHECK-GI-NEXT: mov b28, v16.b[13]
+; CHECK-GI-NEXT: mov b29, v16.b[14]
+; CHECK-GI-NEXT: sxtb w30, w23
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: fmov w20, s16
+; CHECK-GI-NEXT: mov b30, v16.b[15]
+; CHECK-GI-NEXT: fmov s16, w8
+; CHECK-GI-NEXT: sxtb w8, w12
+; CHECK-GI-NEXT: fmov w15, s17
+; CHECK-GI-NEXT: fmov w11, s18
+; CHECK-GI-NEXT: ldp q18, q17, [x0]
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w27
+; CHECK-GI-NEXT: fmov w18, s20
+; CHECK-GI-NEXT: sxtb w15, w15
+; CHECK-GI-NEXT: mov v16.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w25
+; CHECK-GI-NEXT: mov b20, v18.b[3]
+; CHECK-GI-NEXT: fmov w2, s22
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: sxtb w18, w18
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: fmov w3, s19
+; CHECK-GI-NEXT: mov b19, v18.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w24
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w28, s20
+; CHECK-GI-NEXT: mov b20, v18.b[11]
+; CHECK-GI-NEXT: fmov w8, s22
+; CHECK-GI-NEXT: mov b25, v18.b[8]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov v7.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w22
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: mov b24, v18.b[4]
+; CHECK-GI-NEXT: mov b23, v18.b[5]
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: sxtb w23, w26
+; CHECK-GI-NEXT: mov v7.h[6], w9
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: mov b19, v18.b[14]
+; CHECK-GI-NEXT: fmov w10, s25
+; CHECK-GI-NEXT: fmov w4, s26
+; CHECK-GI-NEXT: fmov w16, s28
+; CHECK-GI-NEXT: mov b26, v18.b[7]
+; CHECK-GI-NEXT: mov b28, v18.b[10]
+; CHECK-GI-NEXT: fmov w27, s24
+; CHECK-GI-NEXT: mov b24, v18.b[12]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v7.h[7], w7
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: sxtb w4, w4
+; CHECK-GI-NEXT: fmov s20, w23
+; CHECK-GI-NEXT: fmov w25, s23
+; CHECK-GI-NEXT: mov b23, v18.b[13]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: mov v16.h[5], w30
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov b21, v17.b[1]
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: sxtb w20, w6
+; CHECK-GI-NEXT: fmov w6, s19
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov b28, v17.b[8]
+; CHECK-GI-NEXT: fmov s18, w8
+; CHECK-GI-NEXT: sxtb w8, w21
+; CHECK-GI-NEXT: mov v16.h[6], w5
+; CHECK-GI-NEXT: fmov w5, s22
+; CHECK-GI-NEXT: fmov s22, w10
+; CHECK-GI-NEXT: sxtb w10, w27
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: mov v20.h[2], w29
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: sxtb w8, w28
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: sxtb w19, w9
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: mov b25, v17.b[2]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v17.b[9]
+; CHECK-GI-NEXT: mov v22.h[1], w19
+; CHECK-GI-NEXT: fmov w23, s23
+; CHECK-GI-NEXT: mov v20.h[3], w8
+; CHECK-GI-NEXT: mov b23, v17.b[6]
+; CHECK-GI-NEXT: fmov w30, s24
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v17.b[5]
+; CHECK-GI-NEXT: mov v18.h[1], w20
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: mov b25, v17.b[10]
+; CHECK-GI-NEXT: mov v19.h[1], w28
+; CHECK-GI-NEXT: sxtb w28, w29
+; CHECK-GI-NEXT: mov v22.h[2], w26
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov v20.h[4], w10
+; CHECK-GI-NEXT: fmov w10, s28
+; CHECK-GI-NEXT: fmov s21, w27
+; CHECK-GI-NEXT: sxtb w21, w21
+; CHECK-GI-NEXT: mov b27, v17.b[3]
+; CHECK-GI-NEXT: fmov w19, s23
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: fmov w22, s26
+; CHECK-GI-NEXT: mov b26, v17.b[4]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v21.h[1], w28
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b24, v17.b[11]
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v18.h[2], w18
+; CHECK-GI-NEXT: sxtb w18, w25
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: fmov w20, s27
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w9, s26
+; CHECK-GI-NEXT: mov b26, v17.b[12]
+; CHECK-GI-NEXT: sxtb w25, w27
+; CHECK-GI-NEXT: mov v20.h[5], w18
+; CHECK-GI-NEXT: sxtb w18, w3
+; CHECK-GI-NEXT: sxtb w3, w24
+; CHECK-GI-NEXT: mov v23.h[1], w26
+; CHECK-GI-NEXT: mov v21.h[2], w21
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v22.h[3], w7
+; CHECK-GI-NEXT: sxtb w7, w20
+; CHECK-GI-NEXT: mov v19.h[2], w4
+; CHECK-GI-NEXT: sxtb w4, w30
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[3], w15
+; CHECK-GI-NEXT: sxtb w20, w28
+; CHECK-GI-NEXT: sxtb w15, w17
+; CHECK-GI-NEXT: sxtb w17, w22
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v23.h[2], w25
+; CHECK-GI-NEXT: mov v20.h[6], w3
+; CHECK-GI-NEXT: mov v21.h[3], w7
+; CHECK-GI-NEXT: fmov w10, s26
+; CHECK-GI-NEXT: mov v22.h[4], w4
+; CHECK-GI-NEXT: mov v19.h[3], w18
+; CHECK-GI-NEXT: sxtb w18, w23
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b27, v17.b[13]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[3], w20
+; CHECK-GI-NEXT: mov v18.h[4], w13
+; CHECK-GI-NEXT: sxtb w13, w6
+; CHECK-GI-NEXT: mov v20.h[7], w17
+; CHECK-GI-NEXT: mov v21.h[4], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[5], w18
+; CHECK-GI-NEXT: mov b25, v17.b[14]
+; CHECK-GI-NEXT: fmov w26, s27
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: fmov w14, s29
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v23.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w11
+; CHECK-GI-NEXT: sxtb w11, w16
+; CHECK-GI-NEXT: mov v21.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w15, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v18.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v20.8h, v7.8h, v20.8h
+; CHECK-GI-NEXT: mov b7, v17.b[7]
+; CHECK-GI-NEXT: mov v22.h[6], w13
+; CHECK-GI-NEXT: sxtb w13, w5
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v19.h[5], w11
+; CHECK-GI-NEXT: sxtb w11, w2
+; CHECK-GI-NEXT: mov b17, v17.b[15]
+; CHECK-GI-NEXT: mov v18.h[6], w8
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: mov v23.h[5], w15
+; CHECK-GI-NEXT: mov v21.h[6], w10
+; CHECK-GI-NEXT: sxtb w14, w27
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: mov v22.h[7], w13
+; CHECK-GI-NEXT: fmov w12, s30
+; CHECK-GI-NEXT: mov v19.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: smov w10, v20.h[0]
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w13, v20.h[1]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: smov w11, v20.h[4]
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mul v22.8h, v16.8h, v22.8h
+; CHECK-GI-NEXT: smov w14, v20.h[3]
+; CHECK-GI-NEXT: mov v21.h[7], w8
+; CHECK-GI-NEXT: ldrsb w8, [x0, #32]
+; CHECK-GI-NEXT: mov v19.h[7], w12
+; CHECK-GI-NEXT: mov v23.h[7], w9
+; CHECK-GI-NEXT: ldrsb w9, [x1, #32]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: smov w10, v20.h[2]
+; CHECK-GI-NEXT: smov w12, v20.h[5]
+; CHECK-GI-NEXT: fmov s16, w11
+; CHECK-GI-NEXT: mul w9, w9, w8
+; CHECK-GI-NEXT: smov w15, v22.h[4]
+; CHECK-GI-NEXT: smov w17, v22.h[5]
+; CHECK-GI-NEXT: mul v24.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: mov v7.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[0]
+; CHECK-GI-NEXT: mul v18.8h, v19.8h, v23.8h
+; CHECK-GI-NEXT: smov w16, v22.h[1]
+; CHECK-GI-NEXT: smov w8, v20.h[7]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: mov v16.s[1], w12
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: smov w15, v22.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: smov w11, v24.h[0]
+; CHECK-GI-NEXT: mov v7.s[2], w10
+; CHECK-GI-NEXT: smov w10, v20.h[6]
+; CHECK-GI-NEXT: smov w12, v24.h[1]
+; CHECK-GI-NEXT: smov w0, v18.h[4]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mov v19.s[1], w17
+; CHECK-GI-NEXT: smov w17, v18.h[0]
+; CHECK-GI-NEXT: smov w18, v18.h[1]
+; CHECK-GI-NEXT: smov w13, v22.h[2]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: smov w15, v25.b[13]
+; CHECK-GI-NEXT: fmov s20, w11
+; CHECK-GI-NEXT: smov w11, v24.h[4]
+; CHECK-GI-NEXT: mov v7.s[3], w14
+; CHECK-GI-NEXT: smov w14, v24.h[5]
+; CHECK-GI-NEXT: mov v17.s[1], w16
+; CHECK-GI-NEXT: smov w16, v24.h[2]
+; CHECK-GI-NEXT: mov v19.s[2], w15
+; CHECK-GI-NEXT: smov w15, v18.h[5]
+; CHECK-GI-NEXT: fmov s23, w0
+; CHECK-GI-NEXT: mov v20.s[1], w12
+; CHECK-GI-NEXT: mov v16.s[2], w10
+; CHECK-GI-NEXT: smov w10, v22.h[3]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: smov w11, v22.h[7]
+; CHECK-GI-NEXT: fmov s22, w17
+; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: smov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: smov w8, v21.b[11]
-; CHECK-GI-NEXT: smov w10, v21.b[14]
-; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: smov w11, v25.b[2]
-; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: smov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: smov w14, v21.b[15]
-; CHECK-GI-NEXT: smov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: smov w13, v25.b[12]
-; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: smov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: smov w8, v26.b[5]
-; CHECK-GI-NEXT: smov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w15
+; CHECK-GI-NEXT: smov w15, v18.h[6]
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: smov w14, v24.h[6]
+; CHECK-GI-NEXT: mov v20.s[2], w16
+; CHECK-GI-NEXT: mov v22.s[1], w18
+; CHECK-GI-NEXT: smov w16, v18.h[2]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: smov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: smov w9, v25.b[10]
-; CHECK-GI-NEXT: smov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: smov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: smov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: smov w12, v26.b[4]
-; CHECK-GI-NEXT: smov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: smov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: smov w13, v26.b[9]
; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
+; CHECK-GI-NEXT: mov v6.s[2], wzr
; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: smov w15, v25.b[15]
-; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[6]
-; CHECK-GI-NEXT: smov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: smov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: smov w10, v26.b[3]
-; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: smov w15, v22.b[0]
-; CHECK-GI-NEXT: smov w8, v26.b[7]
-; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[3]
+; CHECK-GI-NEXT: smov w15, v18.h[7]
+; CHECK-GI-NEXT: fmov s18, w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[2], w13
+; CHECK-GI-NEXT: smov w12, v24.h[3]
+; CHECK-GI-NEXT: smov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v22.s[2], w16
+; CHECK-GI-NEXT: mov v0.s[2], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: smov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: smov w10, v22.b[1]
-; CHECK-GI-NEXT: smov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: smov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: smov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: smov w15, v22.b[12]
-; CHECK-GI-NEXT: smov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[13]
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: smov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: smov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: smov w15, v26.b[15]
-; CHECK-GI-NEXT: smov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: smov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrsb w14, [x1, #32]
-; CHECK-GI-NEXT: smov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrsb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: smov w9, v22.b[7]
-; CHECK-GI-NEXT: smov w11, v22.b[3]
-; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v17.s[3], w10
+; CHECK-GI-NEXT: mov v19.s[3], w11
+; CHECK-GI-NEXT: mov v20.s[3], w12
+; CHECK-GI-NEXT: mov v21.s[3], w13
+; CHECK-GI-NEXT: mov v22.s[3], w14
+; CHECK-GI-NEXT: mov v23.s[3], w15
+; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: smov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: smov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
+; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: add v4.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v19.4s
+; CHECK-GI-NEXT: add v6.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: add v7.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v18.s[3], wzr
+; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v0.4s, v18.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4845,13 +5844,12 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
;
; CHECK-GI-LABEL: test_sdot_v33i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub sp, sp, #96
-; CHECK-GI-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #80] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 96
+; CHECK-GI-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: str x29, [sp, #64] // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 80
; CHECK-GI-NEXT: .cfi_offset w29, -16
; CHECK-GI-NEXT: .cfi_offset b8, -24
; CHECK-GI-NEXT: .cfi_offset b9, -32
@@ -4861,508 +5859,762 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
; CHECK-GI-NEXT: .cfi_offset b13, -64
; CHECK-GI-NEXT: .cfi_offset b14, -72
; CHECK-GI-NEXT: .cfi_offset b15, -80
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w10, w2
-; CHECK-GI-NEXT: sxtb w11, w4
-; CHECK-GI-NEXT: sxtb w12, w5
-; CHECK-GI-NEXT: sxtb w13, w7
-; CHECK-GI-NEXT: fmov s28, w8
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #80]
+; CHECK-GI-NEXT: lsl w11, w1, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #88]
+; CHECK-GI-NEXT: ldr w13, [sp, #128]
+; CHECK-GI-NEXT: ldr w14, [sp, #136]
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s22, w12
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #152]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w16, w7, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: mov v22.h[1], w8
; CHECK-GI-NEXT: ldr w8, [sp, #96]
-; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: sxtb w11, w6
-; CHECK-GI-NEXT: ldr w14, [sp, #528]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s18, wzr
-; CHECK-GI-NEXT: fmov s20, wzr
-; CHECK-GI-NEXT: mov v28.s[1], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #176]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v23.h[1], w9
; CHECK-GI-NEXT: ldr w9, [sp, #104]
-; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-GI-NEXT: fmov s24, w8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #224]
+; CHECK-GI-NEXT: mov v22.h[2], w10
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w4, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v23.h[2], w8
; CHECK-GI-NEXT: ldr w8, [sp, #112]
-; CHECK-GI-NEXT: mov v25.s[1], w12
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #136]
-; CHECK-GI-NEXT: mov v18.s[1], wzr
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v20.s[1], wzr
; CHECK-GI-NEXT: fmov s19, wzr
-; CHECK-GI-NEXT: mov v28.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w3
-; CHECK-GI-NEXT: mov v24.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #128]
-; CHECK-GI-NEXT: mov v25.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #168]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s21, wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[2], wzr
-; CHECK-GI-NEXT: mov v28.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #160]
-; CHECK-GI-NEXT: mov v24.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #120]
-; CHECK-GI-NEXT: fmov s30, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #144]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #200]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[1], wzr
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: mov v30.s[1], w12
-; CHECK-GI-NEXT: ldr w10, [sp, #176]
-; CHECK-GI-NEXT: mov v24.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #224]
-; CHECK-GI-NEXT: ldr w12, [sp, #152]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v22.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #192]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #232]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s23, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #240]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], wzr
-; CHECK-GI-NEXT: mov v20.s[3], wzr
-; CHECK-GI-NEXT: mov v22.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #184]
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: mov v23.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #256]
-; CHECK-GI-NEXT: ldr w11, [sp, #208]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v30.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #264]
-; CHECK-GI-NEXT: mov v26.s[1], w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v22.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #296]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w10
-; CHECK-GI-NEXT: mov v23.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #248]
-; CHECK-GI-NEXT: mov v26.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #304]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: fmov s31, w9
-; CHECK-GI-NEXT: mov v29.s[1], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #312]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s16, wzr
-; CHECK-GI-NEXT: mov v31.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #328]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w11
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #336]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #368]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: fmov s0, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #320]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s12, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v29.s[3], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #376]
-; CHECK-GI-NEXT: mov v0.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #344]
-; CHECK-GI-NEXT: ldr w8, [sp, #288]
-; CHECK-GI-NEXT: mov v12.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #392]
-; CHECK-GI-NEXT: mov v31.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #424]
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #432]
-; CHECK-GI-NEXT: fmov s13, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v12.s[2], w9
-; CHECK-GI-NEXT: fmov s8, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w10, [sp, #440]
-; CHECK-GI-NEXT: ldr w11, [sp, #384]
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
+; CHECK-GI-NEXT: fmov s18, wzr
; CHECK-GI-NEXT: fmov s17, wzr
-; CHECK-GI-NEXT: mov v13.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #408]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w12, [sp, #456]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: mov v12.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #488]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v13.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #496]
-; CHECK-GI-NEXT: mov v0.s[3], w9
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #416]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w9, [sp, #464]
-; CHECK-GI-NEXT: fmov s14, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #504]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #448]
-; CHECK-GI-NEXT: mul v27.4s, v25.4s, v0.4s
-; CHECK-GI-NEXT: mov v13.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #560]
-; CHECK-GI-NEXT: sxtb w15, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #568]
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #520]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #472]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s10, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #552]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s15, w13
-; CHECK-GI-NEXT: mov v8.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #480]
-; CHECK-GI-NEXT: mov v10.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #576]
-; CHECK-GI-NEXT: mov v9.s[2], w15
-; CHECK-GI-NEXT: mul w8, w8, w10
-; CHECK-GI-NEXT: mov v15.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #512]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w13, [sp, #536]
-; CHECK-GI-NEXT: mla v27.4s, v28.4s, v31.4s
-; CHECK-GI-NEXT: mul v30.4s, v30.4s, v13.4s
-; CHECK-GI-NEXT: mov v10.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #592]
-; CHECK-GI-NEXT: fmov s25, w8
-; CHECK-GI-NEXT: mov v14.s[3], w9
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #600]
-; CHECK-GI-NEXT: mla v30.4s, v24.4s, v12.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v10.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #632]
-; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #656]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s28, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #688]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v15.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #544]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v0.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #664]
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #696]
-; CHECK-GI-NEXT: fmov s11, w8
-; CHECK-GI-NEXT: fmov s31, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #672]
-; CHECK-GI-NEXT: ldr w8, [sp, #616]
-; CHECK-GI-NEXT: mov v11.s[1], w9
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #608]
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #704]
-; CHECK-GI-NEXT: ldr w9, [sp, #640]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v24.4s, v26.4s, v14.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #712]
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: mov v31.s[2], w10
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: mov v28.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w10, [sp, #720]
-; CHECK-GI-NEXT: ldr w9, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v26.4s, v29.4s, v15.4s
-; CHECK-GI-NEXT: mla v24.4s, v22.4s, v8.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v0.s[3], w8
-; CHECK-GI-NEXT: mov v31.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #784]
-; CHECK-GI-NEXT: mov v28.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: fmov s13, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #792]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v11.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #760]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w8, [sp, #728]
-; CHECK-GI-NEXT: fmov s14, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s12, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #824]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mla v26.4s, v23.4s, v9.4s
-; CHECK-GI-NEXT: ldr w13, [sp, #984]
-; CHECK-GI-NEXT: mov v14.s[1], w10
-; CHECK-GI-NEXT: sxtb w10, w12
-; CHECK-GI-NEXT: mov v13.s[1], w8
-; CHECK-GI-NEXT: mov v12.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #832]
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: ldr w12, [sp, #768]
-; CHECK-GI-NEXT: ldr w10, [sp, #800]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[3], w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: fmov s20, wzr
; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: lsl w11, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: fmov s7, wzr
; CHECK-GI-NEXT: fmov s2, wzr
-; CHECK-GI-NEXT: mov v29.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #840]
-; CHECK-GI-NEXT: mov v13.s[2], w8
-; CHECK-GI-NEXT: mov v12.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #808]
-; CHECK-GI-NEXT: mov v14.s[2], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w8, [sp, #776]
-; CHECK-GI-NEXT: ldr w10, [sp, #848]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s5, wzr
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v23.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #160]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #168]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v29.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #856]
-; CHECK-GI-NEXT: mov v13.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #864]
-; CHECK-GI-NEXT: mov v14.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #888]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s7, wzr
-; CHECK-GI-NEXT: fmov s15, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #920]
-; CHECK-GI-NEXT: mov v12.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #872]
-; CHECK-GI-NEXT: mov v29.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #896]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s22, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #928]
-; CHECK-GI-NEXT: mov v15.s[1], w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #952]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w11, [sp, #904]
-; CHECK-GI-NEXT: mov v22.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #936]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mov v15.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #960]
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #880]
-; CHECK-GI-NEXT: fmov s23, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w9, [sp, #944]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v22.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #912]
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #968]
-; CHECK-GI-NEXT: mov v23.s[1], w8
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: add v18.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: mov v22.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #992]
-; CHECK-GI-NEXT: fmov s9, w12
-; CHECK-GI-NEXT: mov v23.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #1048]
-; CHECK-GI-NEXT: ldr w12, [sp, #1056]
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v15.4s
-; CHECK-GI-NEXT: sxtb w13, w11
-; CHECK-GI-NEXT: mov v8.s[3], w9
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: ldr w9, [sp, #1000]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w10, [sp, #1016]
-; CHECK-GI-NEXT: ldr w8, [sp, #816]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s3, wzr
+; CHECK-GI-NEXT: mov v24.h[1], w12
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: mov v22.h[5], w11
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[5], w9
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w9, [sp, #192]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: mov v24.h[2], w8
+; CHECK-GI-NEXT: mov v22.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #208]
+; CHECK-GI-NEXT: mov v23.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #216]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #200]
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: mov v24.h[3], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[7], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #288]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v23.h[7], w10
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s27, w12
+; CHECK-GI-NEXT: ldr w10, [sp, #232]
+; CHECK-GI-NEXT: sbfx w16, w14, #8, #8
+; CHECK-GI-NEXT: mov v24.h[4], w15
+; CHECK-GI-NEXT: lsl w15, w11, #8
+; CHECK-GI-NEXT: sbfx w14, w18, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #296]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #344]
+; CHECK-GI-NEXT: mov v27.h[1], w13
+; CHECK-GI-NEXT: lsl w13, w17, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #240]
+; CHECK-GI-NEXT: sbfx w17, w10, #8, #8
+; CHECK-GI-NEXT: mov v25.h[1], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #352]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.h[5], w15
+; CHECK-GI-NEXT: mov v27.h[2], w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #304]
+; CHECK-GI-NEXT: fmov s26, w16
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #248]
+; CHECK-GI-NEXT: mov v25.h[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v24.h[6], w9
+; CHECK-GI-NEXT: lsl w16, w11, #8
+; CHECK-GI-NEXT: mov v26.h[1], w13
+; CHECK-GI-NEXT: mov v27.h[3], w17
+; CHECK-GI-NEXT: sbfx w13, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #312]
+; CHECK-GI-NEXT: ldr w17, [sp, #328]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #256]
+; CHECK-GI-NEXT: ldr w11, [sp, #264]
+; CHECK-GI-NEXT: mov v25.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #368]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v26.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #320]
+; CHECK-GI-NEXT: mov v27.h[4], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #408]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #376]
+; CHECK-GI-NEXT: mov v25.h[4], w9
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: mov v26.h[3], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #416]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w17, #8
+; CHECK-GI-NEXT: mov v27.h[5], w9
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v25.h[5], w14
+; CHECK-GI-NEXT: fmov s29, w15
+; CHECK-GI-NEXT: ldr w14, [sp, #384]
+; CHECK-GI-NEXT: ldr w15, [sp, #472]
+; CHECK-GI-NEXT: mov v26.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #424]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v29.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #480]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v25.h[6], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #432]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v26.h[5], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #392]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v29.h[2], w13
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #336]
+; CHECK-GI-NEXT: ldr w13, [sp, #488]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #440]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v28.h[1], w12
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v29.h[3], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #496]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #400]
+; CHECK-GI-NEXT: mov v26.h[6], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #448]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v28.h[2], w13
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v25.h[7], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #456]
+; CHECK-GI-NEXT: ldr w15, [sp, #504]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v28.h[3], w16
+; CHECK-GI-NEXT: ldr w15, [sp, #512]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mul v30.8h, v22.8h, v25.8h
+; CHECK-GI-NEXT: mov v26.h[7], w9
+; CHECK-GI-NEXT: mov v29.h[5], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #464]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[4], w9
+; CHECK-GI-NEXT: mov v27.h[6], w10
+; CHECK-GI-NEXT: ldr w16, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v30.h[0]
+; CHECK-GI-NEXT: mov v24.h[7], w8
+; CHECK-GI-NEXT: lsl w8, w17, #8
+; CHECK-GI-NEXT: mov v29.h[6], w11
+; CHECK-GI-NEXT: mul v26.8h, v23.8h, v26.8h
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: smov w16, v30.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #528]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[5], w10
+; CHECK-GI-NEXT: mov v27.h[7], w14
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: sbfx w10, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v30.h[4]
+; CHECK-GI-NEXT: mov v29.h[7], w8
+; CHECK-GI-NEXT: smov w15, v26.h[0]
+; CHECK-GI-NEXT: smov w13, v30.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #544]
+; CHECK-GI-NEXT: ldr w11, [sp, #552]
+; CHECK-GI-NEXT: mov v22.s[1], w16
+; CHECK-GI-NEXT: smov w16, v26.h[4]
+; CHECK-GI-NEXT: mov v28.h[6], w10
+; CHECK-GI-NEXT: smov w10, v26.h[1]
+; CHECK-GI-NEXT: fmov s23, w14
+; CHECK-GI-NEXT: smov w14, v26.h[5]
+; CHECK-GI-NEXT: mul v29.8h, v24.8h, v29.8h
+; CHECK-GI-NEXT: fmov s24, w15
+; CHECK-GI-NEXT: smov w15, v26.h[2]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: smov w8, v30.h[5]
+; CHECK-GI-NEXT: smov w17, v30.h[7]
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: mov v22.s[2], w13
+; CHECK-GI-NEXT: smov w13, v30.h[3]
+; CHECK-GI-NEXT: mov v24.s[1], w10
+; CHECK-GI-NEXT: smov w16, v26.h[6]
+; CHECK-GI-NEXT: sbfx w10, w12, #8, #8
+; CHECK-GI-NEXT: smov w18, v29.h[0]
+; CHECK-GI-NEXT: smov w0, v29.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v25.s[1], w14
+; CHECK-GI-NEXT: smov w14, v26.h[7]
+; CHECK-GI-NEXT: mov v28.h[7], w10
+; CHECK-GI-NEXT: mov v22.s[3], w13
+; CHECK-GI-NEXT: smov w13, v26.h[3]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: fmov s26, w18
+; CHECK-GI-NEXT: mov v23.s[1], w8
+; CHECK-GI-NEXT: smov w8, v30.h[6]
+; CHECK-GI-NEXT: mov v25.s[2], w16
+; CHECK-GI-NEXT: lsl w16, w9, #8
+; CHECK-GI-NEXT: mul v31.8h, v27.8h, v28.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #584]
+; CHECK-GI-NEXT: mov v24.s[3], w13
+; CHECK-GI-NEXT: smov w13, v29.h[4]
+; CHECK-GI-NEXT: mov v26.s[1], w0
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.s[2], w8
+; CHECK-GI-NEXT: mov v25.s[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #608]
+; CHECK-GI-NEXT: ldr w8, [sp, #576]
+; CHECK-GI-NEXT: fmov s8, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #616]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s27, w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v26.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[5]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #624]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v8.h[1], w11
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v23.s[3], w17
+; CHECK-GI-NEXT: fmov s9, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: smov w17, v31.h[1]
+; CHECK-GI-NEXT: mov v27.s[1], w15
+; CHECK-GI-NEXT: smov w15, v31.h[0]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v8.h[2], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v9.h[1], w16
+; CHECK-GI-NEXT: smov w16, v31.h[2]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #592]
+; CHECK-GI-NEXT: ldr w12, [sp, #600]
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: smov w15, v29.h[6]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v8.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #640]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v9.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #672]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v28.s[1], w17
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v27.s[2], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #680]
+; CHECK-GI-NEXT: mov v8.h[4], w8
+; CHECK-GI-NEXT: smov w8, v31.h[4]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v9.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #688]
+; CHECK-GI-NEXT: mov v28.s[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #648]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #656]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s30, w8
+; CHECK-GI-NEXT: sbfx w8, w10, #8, #8
+; CHECK-GI-NEXT: smov w10, v31.h[5]
+; CHECK-GI-NEXT: mov v8.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #696]
+; CHECK-GI-NEXT: mov v10.h[1], w15
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v9.h[4], w8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #704]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: ldr w17, [sp, #768]
+; CHECK-GI-NEXT: mov v30.s[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #744]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: mov v10.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v9.h[5], w16
+; CHECK-GI-NEXT: mov v8.h[6], w11
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #720]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v10.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #752]
+; CHECK-GI-NEXT: mov v8.h[7], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #760]
+; CHECK-GI-NEXT: ldr w8, [sp, #784]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mla v0.4s, v10.4s, v29.4s
-; CHECK-GI-NEXT: fmov s10, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #1024]
-; CHECK-GI-NEXT: mul v20.4s, v11.4s, v8.4s
-; CHECK-GI-NEXT: ldr q8, [sp] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v9.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #1008]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: mov v10.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1064]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v10.h[4], w12
+; CHECK-GI-NEXT: sbfx w12, w15, #8, #8
+; CHECK-GI-NEXT: mov v11.h[1], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #776]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v9.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w11, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #808]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v11.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #816]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], wzr
-; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v9.h[7], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #824]
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: mov v10.h[5], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #832]
+; CHECK-GI-NEXT: mov v11.h[3], w13
+; CHECK-GI-NEXT: sbfx w15, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w17, #8
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #872]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #840]
+; CHECK-GI-NEXT: sbfx w13, w18, #8, #8
+; CHECK-GI-NEXT: sbfx w17, w12, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #856]
+; CHECK-GI-NEXT: mov v12.h[1], w15
+; CHECK-GI-NEXT: mov v11.h[4], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #880]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v10.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #848]
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: sbfx w15, w16, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #888]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.s[1], wzr
+; CHECK-GI-NEXT: mov v12.h[2], w17
+; CHECK-GI-NEXT: lsl w17, w10, #8
+; CHECK-GI-NEXT: mov v11.h[5], w9
+; CHECK-GI-NEXT: fmov s13, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #936]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #864]
+; CHECK-GI-NEXT: mov v12.h[3], w15
+; CHECK-GI-NEXT: mov v11.h[6], w8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #1000]
+; CHECK-GI-NEXT: mov v13.h[1], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #944]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #896]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v12.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1008]
+; CHECK-GI-NEXT: fmov s14, w9
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v13.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #952]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w17, w8, #8
+; CHECK-GI-NEXT: smov w8, v29.h[3]
+; CHECK-GI-NEXT: smov w9, v29.h[7]
+; CHECK-GI-NEXT: fmov s29, w11
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v14.h[1], w15
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #904]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v12.h[5], w13
+; CHECK-GI-NEXT: mov v13.h[3], w14
+; CHECK-GI-NEXT: mov v29.h[1], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #960]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #1016]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #1024]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v14.h[2], w16
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #912]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #968]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #976]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v14.h[3], w15
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v29.h[2], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #1032]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #920]
+; CHECK-GI-NEXT: mov v26.s[3], w8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v14.h[4], w11
+; CHECK-GI-NEXT: mov v29.h[3], w13
+; CHECK-GI-NEXT: ldr w11, [sp, #984]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v13.h[5], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #1040]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #928]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[7], w10
+; CHECK-GI-NEXT: mov v27.s[3], w9
+; CHECK-GI-NEXT: mov v14.h[5], w12
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[6], w14
+; CHECK-GI-NEXT: ldr w12, [sp, #1048]
+; CHECK-GI-NEXT: sbfx w14, w16, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #728]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mul v15.8h, v8.8h, v12.8h
+; CHECK-GI-NEXT: smov w16, v31.h[6]
+; CHECK-GI-NEXT: mov v14.h[6], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #992]
+; CHECK-GI-NEXT: mov v29.h[5], w14
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v13.h[7], w13
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #792]
+; CHECK-GI-NEXT: ldr w14, [sp, #1056]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v30.s[2], w16
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: smov w8, v15.h[1]
+; CHECK-GI-NEXT: smov w9, v15.h[5]
+; CHECK-GI-NEXT: mov v29.h[6], w12
+; CHECK-GI-NEXT: lsl w12, w13, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v10.h[7], w11
+; CHECK-GI-NEXT: mov v14.h[7], w10
+; CHECK-GI-NEXT: mul v12.8h, v9.8h, v13.8h
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w10, v15.h[0]
+; CHECK-GI-NEXT: smov w11, v15.h[4]
+; CHECK-GI-NEXT: smov w14, v31.h[7]
+; CHECK-GI-NEXT: smov w15, v31.h[3]
+; CHECK-GI-NEXT: mov v11.h[7], w12
+; CHECK-GI-NEXT: mov v29.h[7], w13
; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mul v13.8h, v10.8h, v14.8h
+; CHECK-GI-NEXT: smov w12, v12.h[0]
+; CHECK-GI-NEXT: smov w13, v12.h[1]
+; CHECK-GI-NEXT: mov v7.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1080]
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w10, [sp, #1032]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: mov v29.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #1072]
-; CHECK-GI-NEXT: mov v19.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: fmov s31, w11
+; CHECK-GI-NEXT: mov v30.s[3], w14
+; CHECK-GI-NEXT: smov w11, v12.h[4]
+; CHECK-GI-NEXT: mul v14.8h, v11.8h, v29.8h
+; CHECK-GI-NEXT: fmov s29, w10
+; CHECK-GI-NEXT: smov w10, v15.h[2]
+; CHECK-GI-NEXT: smov w14, v13.h[0]
+; CHECK-GI-NEXT: fmov s8, w12
+; CHECK-GI-NEXT: smov w16, v13.h[1]
+; CHECK-GI-NEXT: mov v31.s[1], w9
+; CHECK-GI-NEXT: smov w9, v12.h[2]
+; CHECK-GI-NEXT: mov v28.s[3], w15
+; CHECK-GI-NEXT: mov v29.s[1], w8
+; CHECK-GI-NEXT: smov w8, v15.h[6]
+; CHECK-GI-NEXT: smov w15, v12.h[5]
+; CHECK-GI-NEXT: mov v8.s[1], w13
+; CHECK-GI-NEXT: fmov s9, w11
+; CHECK-GI-NEXT: smov w12, v15.h[3]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: smov w14, v13.h[2]
+; CHECK-GI-NEXT: smov w11, v12.h[6]
+; CHECK-GI-NEXT: smov w13, v15.h[7]
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v31.s[2], w8
+; CHECK-GI-NEXT: smov w8, v13.h[4]
+; CHECK-GI-NEXT: mov v29.s[2], w10
+; CHECK-GI-NEXT: mov v10.s[1], w16
+; CHECK-GI-NEXT: smov w16, v14.h[0]
+; CHECK-GI-NEXT: mov v8.s[2], w9
+; CHECK-GI-NEXT: smov w9, v13.h[5]
+; CHECK-GI-NEXT: smov w10, v12.h[3]
+; CHECK-GI-NEXT: mov v9.s[1], w15
+; CHECK-GI-NEXT: smov w15, v13.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mul w8, w8, w9
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: fmov s11, w8
+; CHECK-GI-NEXT: smov w8, v14.h[1]
+; CHECK-GI-NEXT: mov v29.s[3], w12
+; CHECK-GI-NEXT: mov v10.s[2], w14
+; CHECK-GI-NEXT: smov w14, v12.h[7]
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: smov w12, v14.h[4]
+; CHECK-GI-NEXT: mov v8.s[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #536]
+; CHECK-GI-NEXT: mov v11.s[1], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v9.s[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #800]
+; CHECK-GI-NEXT: mov v12.s[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1064]
+; CHECK-GI-NEXT: mov v31.s[3], w13
+; CHECK-GI-NEXT: smov w13, v14.h[5]
+; CHECK-GI-NEXT: sxtb w9, w9
; CHECK-GI-NEXT: sxtb w10, w10
; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v11.s[2], w15
+; CHECK-GI-NEXT: smov w15, v13.h[3]
+; CHECK-GI-NEXT: smov w16, v13.h[7]
+; CHECK-GI-NEXT: fmov s13, w12
+; CHECK-GI-NEXT: mul w9, w9, w10
+; CHECK-GI-NEXT: smov w12, v14.h[2]
+; CHECK-GI-NEXT: mul w8, w11, w8
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: mov v21.s[2], wzr
; CHECK-GI-NEXT: mov v16.s[2], wzr
+; CHECK-GI-NEXT: mov v18.s[2], wzr
; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v13.s[1], w13
+; CHECK-GI-NEXT: smov w13, v14.h[6]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: sxth w10, w8
+; CHECK-GI-NEXT: mov v20.s[2], wzr
; CHECK-GI-NEXT: mov v6.s[2], wzr
+; CHECK-GI-NEXT: mov v7.s[2], wzr
; CHECK-GI-NEXT: mov v2.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], wzr
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: mov v10.s[3], w11
-; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
-; CHECK-GI-NEXT: ldr w9, [sp, #976]
-; CHECK-GI-NEXT: fmov s21, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #1040]
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: add v22.4s, v22.4s, v23.4s
+; CHECK-GI-NEXT: add v25.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: fmov s23, w9
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v12.s[2], w12
+; CHECK-GI-NEXT: mov v13.s[2], w13
+; CHECK-GI-NEXT: smov w8, v14.h[3]
+; CHECK-GI-NEXT: smov w9, v14.h[7]
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v0.s[2], wzr
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[3], wzr
; CHECK-GI-NEXT: mov v16.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[3], wzr
; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.16b, v8.16b
-; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v20.s[3], wzr
; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v7.s[3], wzr
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], wzr
-; CHECK-GI-NEXT: mov v25.s[1], wzr
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mul v8.4s, v13.4s, v9.4s
-; CHECK-GI-NEXT: mul v9.4s, v14.4s, v10.4s
-; CHECK-GI-NEXT: mov v23.s[3], w9
-; CHECK-GI-NEXT: mov v29.s[3], w8
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: mov v23.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], wzr
+; CHECK-GI-NEXT: mov v9.s[3], w14
+; CHECK-GI-NEXT: mov v10.s[3], w15
+; CHECK-GI-NEXT: mov v11.s[3], w16
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: add v16.4s, v16.4s, v17.4s
-; CHECK-GI-NEXT: add v3.4s, v3.4s, v6.4s
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
-; CHECK-GI-NEXT: add v4.4s, v4.4s, v7.4s
-; CHECK-GI-NEXT: mov v25.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mla v20.4s, v28.4s, v22.4s
-; CHECK-GI-NEXT: mla v8.4s, v31.4s, v23.4s
-; CHECK-GI-NEXT: mla v9.4s, v12.4s, v29.4s
-; CHECK-GI-NEXT: add v5.4s, v19.4s, v16.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v18.4s
-; CHECK-GI-NEXT: add v3.4s, v11.4s, v3.4s
+; CHECK-GI-NEXT: mov v12.s[3], w8
+; CHECK-GI-NEXT: mov v13.s[3], w9
+; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
+; CHECK-GI-NEXT: add v16.4s, v16.4s, v18.4s
+; CHECK-GI-NEXT: add v17.4s, v17.4s, v20.4s
+; CHECK-GI-NEXT: add v6.4s, v6.4s, v7.4s
; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v4.4s, v27.4s, v30.4s
-; CHECK-GI-NEXT: add v6.4s, v24.4s, v26.4s
-; CHECK-GI-NEXT: ldr x29, [sp, #80] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v25.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v20.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v5.4s
-; CHECK-GI-NEXT: add v5.4s, v8.4s, v9.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v2.4s
-; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v25.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: add v2.4s, v21.4s, v2.4s
-; CHECK-GI-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v3.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v3.4s, v5.4s
+; CHECK-GI-NEXT: mov v23.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], wzr
+; CHECK-GI-NEXT: add v26.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v27.4s, v28.4s, v30.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v19.4s
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
+; CHECK-GI-NEXT: add v5.4s, v29.4s, v31.4s
+; CHECK-GI-NEXT: add v7.4s, v8.4s, v9.4s
+; CHECK-GI-NEXT: add v16.4s, v10.4s, v11.4s
+; CHECK-GI-NEXT: add v17.4s, v12.4s, v13.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v6.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v23.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], wzr
+; CHECK-GI-NEXT: add v3.4s, v22.4s, v25.4s
+; CHECK-GI-NEXT: add v6.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-GI-NEXT: add v4.4s, v5.4s, v7.4s
+; CHECK-GI-NEXT: add v5.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v2.4s
+; CHECK-GI-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
+; CHECK-GI-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v2.4s, v3.4s, v6.4s
+; CHECK-GI-NEXT: add v1.4s, v23.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v0.4s, v24.4s, v0.4s
+; CHECK-GI-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: add sp, sp, #96
+; CHECK-GI-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <33 x i8> %a to <33 x i32>
diff --git a/llvm/test/CodeGen/AArch64/neon-extmul.ll b/llvm/test/CodeGen/AArch64/neon-extmul.ll
index c82f8e1..84b634d 100644
--- a/llvm/test/CodeGen/AArch64/neon-extmul.ll
+++ b/llvm/test/CodeGen/AArch64/neon-extmul.ll
@@ -12,10 +12,9 @@ define <8 x i32> @extmuls_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: smull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -34,10 +33,9 @@ define <8 x i32> @extmulu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -79,12 +77,9 @@ define <8 x i32> @extmuladds_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: smlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: saddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: saddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -104,12 +99,9 @@ define <8 x i32> @extmuladdu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -163,16 +155,13 @@ define <8 x i64> @extmuls_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: smull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: smull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: smull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: smull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: sshll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: sshll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: sshll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: sshll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -195,16 +184,13 @@ define <8 x i64> @extmulu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: umull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
@@ -263,20 +249,13 @@ define <8 x i64> @extmuladds_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: smlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: smlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: smlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: smlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: saddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: saddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: saddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: saddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -301,20 +280,13 @@ define <8 x i64> @extmuladdu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
diff --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
index 17ad298..3caac1d 100644
--- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
+++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
@@ -1,40 +1,72 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
%struct.anon = type { ptr, ptr }
@ptr_wrapper = common global ptr null, align 8
define i32 @test_func_i32_two_uses(i32 %in, i32 %bit, i32 %mask) {
-; CHECK-LABEL: test_func_i32_two_uses:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB0_3
-; CHECK-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: lsl w1, w1, #1
-; CHECK-NEXT: cbz w1, .LBB0_6
-; CHECK-NEXT: .LBB0_3: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands w10, w1, w0
-; CHECK-NEXT: and w11, w2, w0
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cmp w10, w11
-; CHECK-NEXT: b.eq .LBB0_1
-; CHECK-NEXT: // %bb.4: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbnz w2, .LBB0_1
-; CHECK-NEXT: // %bb.5: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbz w10, .LBB0_2
-; CHECK-NEXT: b .LBB0_1
-; CHECK-NEXT: .LBB0_6: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i32_two_uses:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB0_3
+; CHECK-SD-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: lsl w1, w1, #1
+; CHECK-SD-NEXT: cbz w1, .LBB0_6
+; CHECK-SD-NEXT: .LBB0_3: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands w10, w1, w0
+; CHECK-SD-NEXT: and w11, w2, w0
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cmp w10, w11
+; CHECK-SD-NEXT: b.eq .LBB0_1
+; CHECK-SD-NEXT: // %bb.4: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbnz w2, .LBB0_1
+; CHECK-SD-NEXT: // %bb.5: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbz w10, .LBB0_2
+; CHECK-SD-NEXT: b .LBB0_1
+; CHECK-SD-NEXT: .LBB0_6: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i32_two_uses:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB0_3
+; CHECK-GI-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: lsl w1, w1, #1
+; CHECK-GI-NEXT: cbz w1, .LBB0_6
+; CHECK-GI-NEXT: .LBB0_3: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and w10, w1, w0
+; CHECK-GI-NEXT: tst w1, w0
+; CHECK-GI-NEXT: and w11, w2, w0
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cmp w10, w11
+; CHECK-GI-NEXT: b.eq .LBB0_1
+; CHECK-GI-NEXT: // %bb.4: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbnz w2, .LBB0_1
+; CHECK-GI-NEXT: // %bb.5: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbz w10, .LBB0_2
+; CHECK-GI-NEXT: b .LBB0_1
+; CHECK-GI-NEXT: .LBB0_6: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -70,28 +102,52 @@ do.end: ; preds = %4
}
define i32 @test_func_i64_one_use(i64 %in, i64 %bit, i64 %mask) {
-; CHECK-LABEL: test_func_i64_one_use:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: lsl x1, x1, #1
-; CHECK-NEXT: cbz x1, .LBB1_4
-; CHECK-NEXT: .LBB1_2: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands x10, x1, x0
-; CHECK-NEXT: orr x10, x2, x10
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cbz x10, .LBB1_1
-; CHECK-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: b .LBB1_1
-; CHECK-NEXT: .LBB1_4: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i64_one_use:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB1_2
+; CHECK-SD-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: lsl x1, x1, #1
+; CHECK-SD-NEXT: cbz x1, .LBB1_4
+; CHECK-SD-NEXT: .LBB1_2: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands x10, x1, x0
+; CHECK-SD-NEXT: orr x10, x2, x10
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cbz x10, .LBB1_1
+; CHECK-SD-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: b .LBB1_1
+; CHECK-SD-NEXT: .LBB1_4: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i64_one_use:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB1_2
+; CHECK-GI-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: lsl x1, x1, #1
+; CHECK-GI-NEXT: cbz x1, .LBB1_4
+; CHECK-GI-NEXT: .LBB1_2: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and x10, x1, x0
+; CHECK-GI-NEXT: tst x1, x0
+; CHECK-GI-NEXT: orr x10, x2, x10
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cbz x10, .LBB1_1
+; CHECK-GI-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: b .LBB1_1
+; CHECK-GI-NEXT: .LBB1_4: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -124,11 +180,18 @@ do.end: ; preds = %4
}
define i64 @test_and1(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and1:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ands x8, x0, #0x3
-; CHECK-NEXT: csel x0, x8, x1, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ands x8, x0, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x1, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and x8, x0, #0x3
+; CHECK-GI-NEXT: tst x0, #0x3
+; CHECK-GI-NEXT: csel x0, x8, x1, eq
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%c = icmp eq i64 %a, 0
%s = select i1 %c, i64 %a, i64 %y
@@ -148,23 +211,43 @@ define i64 @test_and2(i64 %x, i64 %y) {
}
define i64 @test_and3(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and3:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: mov x0, xzr
-; CHECK-NEXT: mov x19, x1
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x20, #0x3
-; CHECK-NEXT: csel x0, x8, x19, eq
-; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 32
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NEXT: mov x20, x0
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: mov x19, x1
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x20, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x19, eq
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: and x21, x0, #0x3
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: mov x20, x1
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x21, x20, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 0)
%c = icmp eq i64 %a, 0
@@ -173,19 +256,37 @@ define i64 @test_and3(i64 %x, i64 %y) {
}
define i64 @test_and_4(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and_4:
-; CHECK: // %bb.0:
-; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: ands x0, x0, #0x3
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x19, #0x3
-; CHECK-NEXT: csel x0, x8, x0, eq
-; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and_4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: mov x19, x0
+; CHECK-SD-NEXT: ands x0, x0, #0x3
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x19, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x0, eq
+; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and_4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: and x20, x0, #0x3
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: mov x0, x20
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x20, x0, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 %a)
%c = icmp eq i64 %a, 0
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
index 52cb2d9..c7fb2db 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
@@ -267,7 +267,7 @@ define <vscale x 32 x i16> @interleave4_nxv8i16(<vscale x 8 x i16> %vec0, <vscal
; SME2-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; SME2-NEXT: zip { z0.h - z3.h }, { z0.h - z3.h }
; SME2-NEXT: ret
- %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3)
+ %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv32i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3)
ret <vscale x 32 x i16> %retval
}
@@ -540,30 +540,81 @@ define <vscale x 4 x i32> @interleave2_nxv2i32(<vscale x 2 x i32> %vec0, <vscale
ret <vscale x 4 x i32> %retval
}
-; Float declarations
-declare <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half>, <vscale x 2 x half>)
-declare <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half>, <vscale x 4 x half>)
-declare <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half>, <vscale x 8 x half>)
-declare <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float>, <vscale x 2 x float>)
-declare <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>)
-declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)
+define <vscale x 4 x i16> @interleave2_same_const_splat_nxv4i16() {
+; CHECK-LABEL: interleave2_same_const_splat_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, #3 // =0x3
+; CHECK-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_const_splat_nxv4i16() {
+; SVE-LABEL: interleave2_diff_const_splat_nxv4i16:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #4 // =0x4
+; SVE-NEXT: mov z1.d, #3 // =0x3
+; SVE-NEXT: zip2 z2.d, z1.d, z0.d
+; SVE-NEXT: zip1 z0.d, z1.d, z0.d
+; SVE-NEXT: uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT: ret
+;
+; SME2-LABEL: interleave2_diff_const_splat_nxv4i16:
+; SME2: // %bb.0:
+; SME2-NEXT: mov z0.d, #4 // =0x4
+; SME2-NEXT: mov z1.d, #3 // =0x3
+; SME2-NEXT: zip { z0.d, z1.d }, z1.d, z0.d
+; SME2-NEXT: uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.v4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 4))
+ ret <vscale x 4 x i16> %retval
+}
-; Integer declarations
-declare <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+define <vscale x 4 x i16> @interleave2_same_nonconst_splat_nxv4i16(i16 %a) {
+; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, w0
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat, <vscale x 2 x i16> %splat)
+ ret <vscale x 4 x i16> %retval
+}
-; Predicated
-declare <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
-declare <vscale x 16 x i1> @llvm.vector.interleave2.nxv16i1(<vscale x 8 x i1>, <vscale x 8 x i1>)
-declare <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1>, <vscale x 4 x i1>)
-declare <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
-
-; Illegal type size
-declare <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-
-declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+define <vscale x 4 x i16> @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) {
+; SVE-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; SVE: // %bb.0:
+; SVE-NEXT: // kill: def $w1 killed $w1 def $x1
+; SVE-NEXT: // kill: def $w0 killed $w0 def $x0
+; SVE-NEXT: mov z0.d, x0
+; SVE-NEXT: mov z1.d, x1
+; SVE-NEXT: zip2 z2.d, z0.d, z1.d
+; SVE-NEXT: zip1 z0.d, z0.d, z1.d
+; SVE-NEXT: uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT: ret
+;
+; SME2-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; SME2: // %bb.0:
+; SME2-NEXT: // kill: def $w1 killed $w1 def $x1
+; SME2-NEXT: // kill: def $w0 killed $w0 def $x0
+; SME2-NEXT: mov z0.d, x0
+; SME2-NEXT: mov z1.d, x1
+; SME2-NEXT: zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT: uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT: ret
+ %ins1 = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <vscale x 2 x i16> %ins1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %ins2 = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <vscale x 2 x i16> %ins2, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat1, <vscale x 2 x i16> %splat2)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 8 x i16> @interleave4_same_const_splat_nxv8i16() {
+; CHECK-LABEL: interleave4_same_const_splat_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.h, #3 // =0x3
+; CHECK-NEXT: ret
+ %retval = call <vscale x 8 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
index 9306c20..7dcd56c 100644
--- a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
@@ -1,14 +1,14 @@
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s |FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mattr=+sve < %s | FileCheck %s
-declare i32 @llvm.vscale.i32()
-declare i64 @llvm.vscale.i64()
+target triple = "aarch64-unknown-linux-gnu"
; Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
define i64 @combine_add_vscale_i64() nounwind {
; CHECK-LABEL: combine_add_vscale_i64:
-; CHECK-NOT: add
-; CHECK-NEXT: cntd x0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x0
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%add = add i64 %vscale, %vscale
ret i64 %add
@@ -16,9 +16,10 @@ define i64 @combine_add_vscale_i64() nounwind {
define i32 @combine_add_vscale_i32() nounwind {
; CHECK-LABEL: combine_add_vscale_i32:
-; CHECK-NOT: add
-; CHECK-NEXT: cntd x0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x0
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%add = add i32 %vscale, %vscale
ret i32 %add
@@ -28,9 +29,9 @@ define i32 @combine_add_vscale_i32() nounwind {
; In this test, C0 = 1, C1 = 32.
define i64 @combine_mul_vscale_i64() nounwind {
; CHECK-LABEL: combine_mul_vscale_i64:
-; CHECK-NOT: mul
-; CHECK-NEXT: rdvl x0, #2
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #2
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mul = mul i64 %vscale, 32
ret i64 %mul
@@ -38,9 +39,10 @@ define i64 @combine_mul_vscale_i64() nounwind {
define i32 @combine_mul_vscale_i32() nounwind {
; CHECK-LABEL: combine_mul_vscale_i32:
-; CHECK-NOT: mul
-; CHECK-NEXT: rdvl x0, #3
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #3
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%mul = mul i32 %vscale, 48
ret i32 %mul
@@ -49,11 +51,11 @@ define i32 @combine_mul_vscale_i32() nounwind {
; Canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
; CHECK-LABEL: combine_sub_vscale_i64:
-; CHECK-NOT: sub
-; CHECK-NEXT: rdvl x8, #-1
-; CHECK-NEXT: asr x8, x8, #4
-; CHECK-NEXT: add x0, x0, x8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #-1
+; CHECK-NEXT: asr x8, x8, #4
+; CHECK-NEXT: add x0, x0, x8
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%sub = sub i64 %in, %vscale
ret i64 %sub
@@ -61,11 +63,11 @@ define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
; CHECK-LABEL: combine_sub_vscale_i32:
-; CHECK-NOT: sub
-; CHECK-NEXT: rdvl x8, #-1
-; CHECK-NEXT: asr x8, x8, #4
-; CHECK-NEXT: add w0, w0, w8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #-1
+; CHECK-NEXT: asr x8, x8, #4
+; CHECK-NEXT: add w0, w0, w8
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%sub = sub i32 %in, %vscale
ret i32 %sub
@@ -75,12 +77,13 @@ define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
; (sub X, (vscale * C)) to (add X, (vscale * -C))
define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: multiple_uses_sub_vscale_i64:
-; CHECK-NEXT: rdvl x8, #1
-; CHECK-NEXT: lsr x8, x8, #4
-; CHECK-NEXT: sub x9, x0, x8
-; CHECK-NEXT: add x8, x1, x8
-; CHECK-NEXT: mul x0, x9, x8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: sub x9, x0, x8
+; CHECK-NEXT: add x8, x1, x8
+; CHECK-NEXT: mul x0, x9, x8
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%sub = sub i64 %x, %vscale
%add = add i64 %y, %vscale
@@ -95,9 +98,9 @@ define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind {
; Hence, the immediate for RDVL is #1.
define i64 @combine_shl_vscale_i64() nounwind {
; CHECK-LABEL: combine_shl_vscale_i64:
-; CHECK-NOT: shl
-; CHECK-NEXT: rdvl x0, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #1
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%shl = shl i64 %vscale, 4
ret i64 %shl
@@ -105,10 +108,38 @@ define i64 @combine_shl_vscale_i64() nounwind {
define i32 @combine_shl_vscale_i32() nounwind {
; CHECK-LABEL: combine_shl_vscale_i32:
-; CHECK-NOT: shl
-; CHECK-NEXT: rdvl x0, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #1
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%shl = shl i32 %vscale, 4
ret i32 %shl
}
+
+define i64 @combine_shl_mul_vscale(i64 %a) nounwind {
+; CHECK-LABEL: combine_shl_mul_vscale:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cnth x8
+; CHECK-NEXT: mul x0, x0, x8
+; CHECK-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale.i64()
+ %mul = mul i64 %a, %vscale
+ %shl = shl i64 %mul, 3
+ ret i64 %shl
+}
+
+define i64 @combine_shl_mul_vscale_commuted(i64 %a) nounwind {
+; CHECK-LABEL: combine_shl_mul_vscale_commuted:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cnth x8
+; CHECK-NEXT: mul x0, x0, x8
+; CHECK-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale.i64()
+ %mul = mul i64 %vscale, %a
+ %shl = shl i64 %mul, 3
+ ret i64 %shl
+}
+
+declare i32 @llvm.vscale.i32()
+declare i64 @llvm.vscale.i64()
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 223698b..5fc996a 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -64,7 +64,8 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcsel s2, s0, s3, mi
; CHECK-NEXT: subs w10, w10, #1
; CHECK-NEXT: fcvtzs s2, s2
-; CHECK-NEXT: st1 { v2.b }[0], [x9], #1
+; CHECK-NEXT: fmov w11, s2
+; CHECK-NEXT: strb w11, [x9], #1
; CHECK-NEXT: b.ne .LBB0_7
; CHECK-NEXT: .LBB0_8: // %for.cond.cleanup
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index 290a473..74d1165 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -1907,11 +1907,8 @@ define i32 @test_udot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -1952,17 +1949,13 @@ define i32 @test_udot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: umull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: umull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v16i8:
@@ -2018,36 +2011,21 @@ define i32 @test_udot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: umull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: umlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: umull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: uaddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v24i8:
@@ -2118,61 +2096,33 @@ define i32 @test_udot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: ushll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: ushll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: ushll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: umull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: umull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: umull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: umull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: umull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: umlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: umlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: umlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: umlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: umlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: umull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: umull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: umull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: uaddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: uaddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: uaddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v48i8:
@@ -2225,11 +2175,8 @@ define i32 @test_sdot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: smull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: smull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -2270,17 +2217,13 @@ define i32 @test_sdot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: smull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: smull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: saddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v16i8:
@@ -2336,36 +2279,21 @@ define i32 @test_sdot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: smull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: smlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: smull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: saddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v24i8:
@@ -2436,61 +2364,33 @@ define i32 @test_sdot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: sshll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: sshll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: sshll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: smull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: smull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: smull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: smull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: smull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: smlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: smlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: smlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: smlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: smlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: smull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: smull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: smull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: saddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: saddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: saddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v48i8:
@@ -2549,18 +2449,27 @@ define i32 @test_udot_v8i8_multi_use(<8 x i8> %a, <8 x i8> %b) {
; CHECK-SD-DOT-NEXT: add w0, w8, w9
; CHECK-SD-DOT-NEXT: ret
;
-; CHECK-GI-LABEL: test_udot_v8i8_multi_use:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-NEXT: mov v3.16b, v2.16b
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: umlal2 v3.4s, v1.8h, v0.8h
-; CHECK-GI-NEXT: addv s0, v3.4s
-; CHECK-GI-NEXT: fmov w9, s0
-; CHECK-GI-NEXT: add w0, w9, w8
-; CHECK-GI-NEXT: ret
+; CHECK-GI-BASE-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-BASE: // %bb.0: // %entry
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v0.8h
+; CHECK-GI-BASE-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
+; CHECK-GI-BASE-NEXT: ret
+;
+; CHECK-GI-DOT-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-DOT: // %bb.0: // %entry
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: umull v3.8h, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: udot v2.2s, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: ushll v0.4s, v3.4h, #0
+; CHECK-GI-DOT-NEXT: fmov w9, s0
+; CHECK-GI-DOT-NEXT: addp v1.2s, v2.2s, v2.2s
+; CHECK-GI-DOT-NEXT: fmov w8, s1
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
+; CHECK-GI-DOT-NEXT: ret
entry:
%0 = zext <8 x i8> %a to <8 x i32>
%1 = zext <8 x i8> %b to <8 x i32>
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
index eafad58..2226fd2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
@@ -157,7 +157,7 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
%r0 = load half, ptr addrspace(1) %in1, align 4
%r1 = load half, ptr addrspace(1) %gep2, align 4
- %r2 = frem half %r0, %r1
+ %r2 = frem afn half %r0, %r1
store half %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -311,7 +311,7 @@ define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
%r0 = load float, ptr addrspace(1) %in1, align 4
%r1 = load float, ptr addrspace(1) %gep2, align 4
- %r2 = frem float %r0, %r1
+ %r2 = frem afn float %r0, %r1
store float %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -489,7 +489,7 @@ define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace(
ptr addrspace(1) %in2) #1 {
%r0 = load double, ptr addrspace(1) %in1, align 8
%r1 = load double, ptr addrspace(1) %in2, align 8
- %r2 = frem double %r0, %r1
+ %r2 = frem afn double %r0, %r1
store double %r2, ptr addrspace(1) %out, align 8
ret void
}
@@ -1140,5 +1140,5 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
index 1f9c059..3fa73c2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
@@ -2,9 +2,8 @@
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -enable-unsafe-fp-math -o - %s | FileCheck -check-prefix=GFX9-UNSAFE %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX11 %s
---
name: test_fdiv_s16
@@ -99,17 +98,56 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX10-LABEL: name: test_fdiv_s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -120,6 +158,90 @@ body: |
...
---
+name: test_fdiv_s16_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s16_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; SI-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[FPEXT]], [[INT]]
+ ; SI-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
+ ; SI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s16_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; VI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s16_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s16_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = afn G_FDIV %2, %3
+ %5:_(s32) = G_ANYEXT %4
+ $vgpr0 = COPY %5
+...
+
+---
name: test_fdiv_s32_denorms_on
machineFunctionInfo:
mode:
@@ -192,15 +314,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_on
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_on
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -220,6 +333,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -227,6 +360,70 @@ body: |
...
---
+name: test_fdiv_s32_denorms_on_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+
+---
name: test_fdiv_s32_denorms_off
machineFunctionInfo:
mode:
@@ -305,15 +502,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -335,6 +523,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -342,6 +552,69 @@ body: |
...
---
+name: test_fdiv_s32_denorms_off_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: false
+ fp32-output-denormals: false
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+---
name: test_fdiv_s32_denorms_off_arcp
machineFunctionInfo:
mode:
@@ -420,15 +693,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off_arcp
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off_arcp
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -450,6 +714,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_arcp
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = arcp G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = arcp G_FDIV %0, %1
@@ -536,23 +822,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -572,6 +841,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_FDIV %0, %1
@@ -708,20 +997,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -760,6 +1035,45 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = G_FDIV %0, %1
@@ -877,20 +1191,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32_flags
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32_flags
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -925,6 +1225,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32_flags
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = nnan G_FDIV %0, %1
@@ -1078,22 +1413,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV4]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV5]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[UV2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32), [[FMUL2]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v3s32
; GFX10: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
; GFX10-NEXT: {{ $}}
@@ -1140,6 +1459,53 @@ body: |
; GFX10-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s32
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+ ; GFX11-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV3]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV4]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[INT14:%[0-9]+]]:_(s32), [[INT15:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 0
+ ; GFX11-NEXT: [[INT16:%[0-9]+]]:_(s32), [[INT17:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 1
+ ; GFX11-NEXT: [[INT18:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT14]](s32)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[INT14]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[INT18]], [[C]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FMA10]], [[INT18]], [[INT18]]
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[INT16]], [[FMA11]]
+ ; GFX11-NEXT: [[FMA12:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL2]], [[INT16]]
+ ; GFX11-NEXT: [[FMA13:%[0-9]+]]:_(s32) = G_FMA [[FMA12]], [[FMA11]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA14:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA13]], [[INT16]]
+ ; GFX11-NEXT: [[INT19:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA14]](s32), [[FMA11]](s32), [[FMA13]](s32), [[INT17]](s1)
+ ; GFX11-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
%2:_(<3 x s32>) = G_FDIV %0, %1
@@ -1271,35 +1637,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[UV2]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[UV]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[UV3]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[INT1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA7]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FMA8]], [[FMA7]], [[FMA7]]
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[FMA9]]
- ; GFX9-UNSAFE-NEXT: [[FMA10:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[UV1]]
- ; GFX9-UNSAFE-NEXT: [[FMA11:%[0-9]+]]:_(s64) = G_FMA [[FMA10]], [[FMA9]], [[FMUL1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FMA5]](s64), [[FMA11]](s64)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s64
; GFX10: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
; GFX10-NEXT: {{ $}}
@@ -1334,6 +1671,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s64
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[UV2]](s64), [[UV]](s64)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s64), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 0
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s64)
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT9]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[INT9]], [[FMA5]], [[INT9]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA6]], [[C]]
+ ; GFX11-NEXT: [[INT10:%[0-9]+]]:_(s64), [[INT11:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 1
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[FMA7]], [[FMA6]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INT10]], [[FMA8]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[INT10]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s64), [[FMA8]](s64), [[FMUL1]](s64), [[INT11]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
%2:_(<2 x s64>) = G_FDIV %0, %1
@@ -1502,26 +1874,92 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC2]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v2s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC2]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC3]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV2]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV3]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_FDIV %0, %1
@@ -1756,37 +2194,133 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
; GFX9-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL2]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
- ; GFX9-UNSAFE-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ; GFX10-LABEL: name: test_fdiv_v3s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC3]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC4]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC5]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX10-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX10-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX11-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX11-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX11-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s16>) = G_IMPLICIT_DEF
%1:_(<3 x s16>) = G_IMPLICIT_DEF
%2:_(<3 x s16>) = G_FDIV %0, %1
@@ -2094,42 +2628,164 @@ body: |
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v4s16
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC6]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC7]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL3:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC3]], [[INT3]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL2]](s16), [[FMUL3]](s16)
- ; GFX9-UNSAFE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v4s16
+ ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC4]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC5]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC6]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
+ ; GFX10-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX10-NEXT: [[FMUL15:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX10-NEXT: [[FMUL16:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FMUL15]]
+ ; GFX10-NEXT: [[FADD12:%[0-9]+]]:_(s32) = G_FADD [[FMUL16]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL17:%[0-9]+]]:_(s32) = G_FMUL [[FADD12]], [[INT6]]
+ ; GFX10-NEXT: [[FADD13:%[0-9]+]]:_(s32) = G_FADD [[FMUL17]], [[FMUL15]]
+ ; GFX10-NEXT: [[FMUL18:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FADD13]]
+ ; GFX10-NEXT: [[FADD14:%[0-9]+]]:_(s32) = G_FADD [[FMUL18]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL19:%[0-9]+]]:_(s32) = G_FMUL [[FADD14]], [[INT6]]
+ ; GFX10-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL19]], [[C1]]
+ ; GFX10-NEXT: [[FADD15:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FADD13]]
+ ; GFX10-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD15]](s32)
+ ; GFX10-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[TRUNC7]](s16), [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v4s16
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+ ; GFX11-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX11-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMUL6]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FMA9]], [[INT6]], [[FMUL6]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMA10]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FMA11]], [[INT6]]
+ ; GFX11-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL7]], [[C]]
+ ; GFX11-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FMA10]]
+ ; GFX11-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[UV7]](s16), [[UV3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_FDIV %0, %1
@@ -2185,15 +2841,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2202,6 +2849,15 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2261,16 +2917,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2280,6 +2926,16 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2351,13 +3007,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2376,6 +3025,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2448,14 +3116,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2475,6 +3135,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2558,22 +3238,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2592,6 +3256,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double 1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
@@ -2678,23 +3361,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2714,6 +3380,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double -1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
diff --git a/llvm/test/CodeGen/AMDGPU/add-max.ll b/llvm/test/CodeGen/AMDGPU/add-max.ll
index b992506..00c6656 100644
--- a/llvm/test/CodeGen/AMDGPU/add-max.ll
+++ b/llvm/test/CodeGen/AMDGPU/add-max.ll
@@ -5,9 +5,7 @@
define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -18,9 +16,7 @@ define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_svv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v1
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, v1
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -29,12 +25,17 @@ define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
}
define amdgpu_ps float @add_max_u32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
-; GCN-LABEL: add_max_u32_ssv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_add_co_i32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_ssv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, s0, s1, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_ssv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_add_co_i32 s0, s0, s1
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
%ret = bitcast i32 %max to float
@@ -58,9 +59,7 @@ define amdgpu_ps float @add_max_u32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c
define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
; GCN-LABEL: add_max_u32_vsi:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 4, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, s0, 4
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 4)
@@ -71,9 +70,7 @@ define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
; GCN-LABEL: add_max_u32_svl:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 0x64, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, 0x64
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 100)
@@ -82,12 +79,17 @@ define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
}
define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
-; GCN-LABEL: add_max_u32_slv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_addk_co_i32 s0, 0x64
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_slv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, 0x64, s0, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_slv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_addk_co_i32 s0, 0x64
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, 100
%max = call i32 @llvm.umax.i32(i32 %add, i32 %b)
%ret = bitcast i32 %max to float
@@ -97,9 +99,7 @@ define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smax.i32(i32 %add, i32 %c)
@@ -110,9 +110,7 @@ define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umin.i32(i32 %add, i32 %c)
@@ -123,9 +121,7 @@ define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smin.i32(i32 %add, i32 %c)
diff --git a/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
new file mode 100644
index 0000000..fcbf7ef
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -show-mc-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+define i16 @cvt_pk_bf8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_bf8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_bf8_f16 v0, 0x38003800 ; encoding: [0x00,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x38]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0xH3800, half 0xH3800>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
+
+define i16 @cvt_pk_fp8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_fp8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_fp8_f16 v0, 0x3800 ; encoding: [0x00,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0xH3800, half 0xH0>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
index 9ae9d19..210e09f 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -1702,7 +1702,7 @@ entry:
%gep.r = getelementptr inbounds half, ptr addrspace(1) %r, i64 %tid.ext
%a.val = load volatile half, ptr addrspace(1) %gep.a
%b.val = load volatile half, ptr addrspace(1) %gep.b
- %r.val = fdiv half %a.val, %b.val
+ %r.val = fdiv afn half %a.val, %b.val
store half %r.val, ptr addrspace(1) %gep.r
ret void
}
@@ -2475,4 +2475,4 @@ declare <2 x half> @llvm.sqrt.v2f16(<2 x half>) #2
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
index 57b4857..c52fb61 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -11,6 +11,10 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SDAG-FAKE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-TRUE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-FAKE16 %s
define amdgpu_kernel void @fptrunc_f32_to_f16(
; SI-SDAG-LABEL: fptrunc_f32_to_f16:
@@ -192,6 +196,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -381,6 +418,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_afn(ptr addrspace(1) %r,
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %a) {
entry:
%a.val = load float, ptr addrspace(1) %a
@@ -1089,6 +1159,130 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s3, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s2, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s7, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s9, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s2, s7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s2, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s2, s2, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s7, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s2, s2, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 0x7c00, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1101,62 +1295,21 @@ entry:
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; SI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v1
-; SI-SDAG-NEXT: s_and_b32 s6, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s7, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s8, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; SI-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s6, s6, s7
-; SI-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; SI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; SI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s6, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; SI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s7
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s6, s1, s0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1174,62 +1327,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v1
-; VI-SDAG-NEXT: s_and_b32 s5, s4, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s5, v0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s8, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_and_b32 s5, s7, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s5, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; VI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; VI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; VI-SDAG-NEXT: s_or_b32 s9, s5, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; VI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s5, 0
-; VI-SDAG-NEXT: s_cselect_b32 s5, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s7
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s5
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1247,62 +1359,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX9-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX9-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX9-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX9-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1320,62 +1391,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX950-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX950-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX950-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX950-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1401,60 +1431,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -1468,60 +1451,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -1552,6 +1488,40 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1769,6 +1739,38 @@ define amdgpu_kernel void @fptrunc_v2f32_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3014,6 +3016,225 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s9, 0x3f1, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s5, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s5, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s2, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s8, 1, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s10, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s8, s8, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s9, s2, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s8, s8, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s4, s4, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s11, s10, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s4, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s8, s10
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s2, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s2, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s2, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s7, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s9, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s10, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s6, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s4, s7, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3026,106 +3247,25 @@ entry:
define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; SI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v3
-; SI-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; SI-SDAG-NEXT: s_and_b32 s7, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s8, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s9, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; SI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; SI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; SI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; SI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; SI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; SI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; SI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s7, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; SI-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; SI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; SI-SDAG-NEXT: s_or_b32 s1, s1, s7
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; SI-SDAG-NEXT: s_lshl_b32 s1, s1, 16
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s8
-; SI-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; SI-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s0, s6, s0
-; SI-SDAG-NEXT: s_and_b32 s0, s0, 0xffff
-; SI-SDAG-NEXT: s_or_b32 s6, s0, s1
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; SI-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3147,106 +3287,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; VI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v3
-; VI-SDAG-NEXT: s_and_b32 s7, s4, 0x1ff
-; VI-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; VI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; VI-SDAG-NEXT: s_lshr_b32 s8, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s9, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; VI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; VI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; VI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s7, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; VI-SDAG-NEXT: s_and_b32 s8, s5, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_lshr_b32 s9, s5, 8
-; VI-SDAG-NEXT: s_bfe_u32 s10, s5, 0xb0014
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; VI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; VI-SDAG-NEXT: s_lshl_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, s8
-; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s6
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3267,104 +3325,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX9-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX9-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX9-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX9-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX9-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX9-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX9-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3385,104 +3363,22 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX950-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX950-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX950-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX950-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX950-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX950-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX950-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX950-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3511,109 +3407,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v1, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_pack_b32_f16 v0, v0.h, v0.l
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -3627,109 +3431,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -3768,6 +3480,46 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v1, s[6:7]
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3957,6 +3709,42 @@ define amdgpu_kernel void @fneg_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_xor_b32 s2, s2, 0x80000000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4147,6 +3935,42 @@ define amdgpu_kernel void @fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4337,6 +4161,42 @@ define amdgpu_kernel void @fneg_fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset1_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4536,6 +4396,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4735,6 +4631,45 @@ define amdgpu_kernel void @fptrunc_fabs_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4943,6 +4878,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_sext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
index 4f8eab1..5d31177 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
@@ -226,59 +226,59 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: s_mov_b32 s0, s4
+; VI-SDAG-NEXT: s_lshr_b32 s4, s7, 8
+; VI-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
+; VI-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
+; VI-SDAG-NEXT: s_or_b32 s4, s4, s6
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_mov_b32 s1, s5
+; VI-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v0
+; VI-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
+; VI-SDAG-NEXT: s_or_b32 s4, s8, s4
+; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
+; VI-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
+; VI-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v0
+; VI-SDAG-NEXT: s_lshr_b32 s9, s5, s8
+; VI-SDAG-NEXT: s_lshl_b32 s8, s9, s8
+; VI-SDAG-NEXT: s_cmp_lg_u32 s8, s5
+; VI-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; VI-SDAG-NEXT: s_addk_i32 s6, 0xfc10
+; VI-SDAG-NEXT: s_lshl_b32 s8, s6, 12
+; VI-SDAG-NEXT: s_or_b32 s5, s9, s5
+; VI-SDAG-NEXT: s_or_b32 s8, s4, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 1
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s8
+; VI-SDAG-NEXT: s_and_b32 s8, s5, 7
+; VI-SDAG-NEXT: s_cmp_gt_i32 s8, 5
+; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
+; VI-SDAG-NEXT: s_cmp_eq_u32 s8, 3
+; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
+; VI-SDAG-NEXT: s_or_b32 s8, s8, s9
+; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; VI-SDAG-NEXT: s_add_i32 s5, s5, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 31
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
+; VI-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, s5
+; VI-SDAG-NEXT: s_lshr_b32 s5, s7, 16
+; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
+; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
+; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16:
; VI-GISEL: ; %bb.0:
@@ -331,68 +331,57 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX10-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX10-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX10-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX10-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX10-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX10-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX10-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX10-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX10-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX10-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX10-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX10-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX10-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX10-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX10-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX10-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX10-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX10-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX10-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX10-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX10-GISEL: ; %bb.0:
@@ -445,76 +434,65 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX11-SAFE-SDAG: ; %bb.0:
-; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-SAFE-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX11-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX11-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX11-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX11-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX11-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX11-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX11-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX11-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX11-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX11-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX11-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX11-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX11-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX11-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX11-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX11-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
+; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX11-GISEL: ; %bb.0:
@@ -570,30 +548,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX11-GISEL-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-TRUE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-TRUE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-FAKE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-FAKE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_endpgm
%result = fptrunc double %in to half
%result_i16 = bitcast half %result to i16
store i16 %result_i16, ptr addrspace(1) %out
@@ -603,111 +557,27 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double %in) {
; SI-LABEL: fptrunc_f64_to_f16_afn:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s3, 0xf000
-; SI-NEXT: s_movk_i32 s2, 0x7e00
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshr_b32 s0, s7, 8
-; SI-NEXT: s_and_b32 s1, s7, 0x1ff
-; SI-NEXT: s_and_b32 s8, s0, 0xffe
-; SI-NEXT: s_or_b32 s0, s1, s6
-; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: s_bfe_u32 s0, s7, 0xb0014
-; SI-NEXT: v_readfirstlane_b32 s1, v0
-; SI-NEXT: s_sub_i32 s6, 0x3f1, s0
-; SI-NEXT: s_or_b32 s1, s8, s1
-; SI-NEXT: v_med3_i32 v0, s6, 0, 13
-; SI-NEXT: s_or_b32 s6, s1, 0x1000
-; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_lshr_b32 s9, s6, s8
-; SI-NEXT: s_lshl_b32 s8, s9, s8
-; SI-NEXT: s_cmp_lg_u32 s8, s6
-; SI-NEXT: s_cselect_b32 s6, 1, 0
-; SI-NEXT: s_addk_i32 s0, 0xfc10
-; SI-NEXT: s_or_b32 s6, s9, s6
-; SI-NEXT: s_lshl_b32 s8, s0, 12
-; SI-NEXT: s_or_b32 s8, s1, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 1
-; SI-NEXT: s_cselect_b32 s6, s6, s8
-; SI-NEXT: s_and_b32 s8, s6, 7
-; SI-NEXT: s_cmp_gt_i32 s8, 5
-; SI-NEXT: s_cselect_b32 s9, 1, 0
-; SI-NEXT: s_cmp_eq_u32 s8, 3
-; SI-NEXT: s_cselect_b32 s8, 1, 0
-; SI-NEXT: s_lshr_b32 s6, s6, 2
-; SI-NEXT: s_or_b32 s8, s8, s9
-; SI-NEXT: s_add_i32 s6, s6, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 31
-; SI-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; SI-NEXT: s_cmp_lg_u32 s1, 0
-; SI-NEXT: s_cselect_b32 s1, s2, 0x7c00
-; SI-NEXT: s_cmpk_eq_i32 s0, 0x40f
-; SI-NEXT: s_cselect_b32 s0, s1, s6
-; SI-NEXT: s_lshr_b32 s1, s7, 16
-; SI-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-NEXT: s_or_b32 s6, s1, s0
-; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: s_mov_b32 s0, s4
-; SI-NEXT: s_mov_b32 s1, s5
-; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; VI-GISEL: ; %bb.0:
@@ -720,68 +590,16 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; GFX10-GISEL: ; %bb.0:
@@ -794,74 +612,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX11-SAFE-SDAG: ; %bb.0:
; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-SAFE-SDAG-NEXT: s_endpgm
;
@@ -1833,4 +1592,8 @@ define amdgpu_kernel void @fptrunc_v8f64_to_v8f32_afn(ptr addrspace(1) %out, <8
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX10-SAFE-GISEL: {{.*}}
+; GFX10-SAFE-SDAG: {{.*}}
+; GFX10-UNSAFE-SDAG: {{.*}}
; VI-SAFE-GISEL: {{.*}}
+; VI-SAFE-SDAG: {{.*}}
+; VI-UNSAFE-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
index 87c7cce..f81950b 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
@@ -1294,13 +1294,13 @@ define float @v_sqrt_f32__enough_unsafe_attrs(float %x) #3 {
ret float %result
}
-define float @v_sqrt_f32__unsafe_attr(float %x) #4 {
+define float @v_sqrt_f32__unsafe_attr(float %x) {
; GCN-LABEL: v_sqrt_f32__unsafe_attr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_sqrt_f32_e32 v0, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
- %result = call nsz float @llvm.sqrt.f32(float %x)
+ %result = call afn nsz float @llvm.sqrt.f32(float %x)
ret float %result
}
@@ -4763,7 +4763,6 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
attributes #1 = { convergent nounwind willreturn memory(none) }
attributes #2 = { "approx-func-fp-math"="true" }
attributes #3 = { "approx-func-fp-math"="true" "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
-attributes #4 = { "unsafe-fp-math"="true" }
attributes #5 = { "no-infs-fp-math"="true" }
!0 = !{float 0.5}
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll b/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll
new file mode 100644
index 0000000..892955c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll
@@ -0,0 +1,98 @@
+; RUN: not llc -mtriple=amdgcn-amd-amdhsa -mcpu=bonaire -filetype=null %s 2>&1 | FileCheck -implicit-check-not=error %s
+
+; CHECK: error: couldn't allocate output register for constraint '{v256}'
+define void @out_of_bounds_vgpr32_def() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v256}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate output register for constraint '{v[255:256]}'
+define void @out_of_bounds_vgpr64_def_high_tuple() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v[255:256]}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate output register for constraint '{v[256:257]}'
+define void @out_of_bounds_vgpr64_def_low_tuple() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v[256:257]}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v256}'
+define void @out_of_bounds_vgpr32_use() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 %0, %1", "=v,{v256}"(i32 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[255:256]}'
+define void @out_of_bounds_vgpr64_high_tuple() {
+ tail call void asm sideeffect "; use %0", "{v[255:256]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[256:257]}'
+define void @out_of_bounds_vgpr64_low_tuple() {
+ tail call void asm sideeffect "; use %0", "{v[256:257]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[1:0]}'
+define void @vgpr_tuple_swapped() {
+ tail call void asm sideeffect "; use %0", "{v[1:0]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v4294967295}'
+define void @vgpr_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v4294967295}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v4294967296}'
+define void @vgpr_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v4294967296}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967295:4294967296]}'
+define void @vgpr_tuple_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[4294967295:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[0:4294967295]}'
+define void @vgpr_tuple_0_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[0:4294967295]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[0:4294967296]}'
+define void @vgpr_tuple_0_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v[0:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967264:4294967295]}'
+define void @vgpr32_last_is_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[4294967264:4294967295]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967265:4294967296]}'
+define void @vgpr32_last_is_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v[4294967265:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[2:2147483651]}'
+define void @overflow_bitwidth_0() {
+ tail call void asm sideeffect "; use %0", "{v[2:2147483651]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[2147483635:2147483651]}'
+define void @overflow_bitwidth_1() {
+ tail call void asm sideeffect "; use %0", "{v[2147483635:2147483651]}"(i64 123)
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
index 43c8d83..fd51759 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
@@ -1,10 +1,188 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-TRUE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+declare i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float, float, i32, i1)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float, i32, i32, i32)
declare float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32, i32)
+define i32 @test_cvt_pk_fp8_f32_word0(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.l, v0, v1 clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 false)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_pk_fp8_f32_word1(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 true)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_pk_fp8_f32_word1_dpp(i32 %a, float %y, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %tmp1, float %y, i32 %old, i1 true)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte0(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 0)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte1(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 1)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte2(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:2 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 2)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte3(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:3 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 3)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_sr_fp8_f32_byte1_dpp(i32 %a, i32 %r, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %tmp1, i32 %r, i32 %old, i32 1)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
define float @test_cvt_f32_fp8_e5m3_byte0(i32 %a) {
; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte0:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
new file mode 100644
index 0000000..6ccfad7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
@@ -0,0 +1,539 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-REAL16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-REAL16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-FAKE16 %s
+
+declare i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half>)
+declare i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half>)
+declare i32 @llvm.amdgcn.cvt.sr.bf8.f16(half, i32, i32, i32)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f16(half, i32, i32, i32)
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
new file mode 100644
index 0000000..2179800
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+declare <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
new file mode 100644
index 0000000..4309cfbe
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_bf8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_bf8 v[4:7], v[0:1], v2 scale_sel:2
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 2)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_f16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_f16_fp4 v[4:7], v0, v1 scale_sel:3
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 3)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp4 v[4:7], v0, v1 scale_sel:4
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 4)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 7)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 1)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
index 291a4e2..217c306 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
@@ -168,7 +168,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -179,7 +179,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -213,7 +213,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -225,7 +225,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -259,7 +259,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -269,7 +269,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -300,7 +300,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -311,7 +311,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -345,7 +345,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -357,7 +357,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -391,7 +391,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -401,7 +401,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -773,7 +773,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -783,7 +783,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -895,7 +895,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -905,7 +905,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -935,7 +935,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -945,7 +945,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
@@ -1602,7 +1602,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1613,7 +1613,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1647,7 +1647,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1659,7 +1659,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1693,7 +1693,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1703,7 +1703,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -1734,7 +1734,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1745,7 +1745,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1779,7 +1779,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1791,7 +1791,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1825,7 +1825,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1835,7 +1835,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -2032,7 +2032,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -2042,7 +2042,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -2112,7 +2112,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -2122,7 +2122,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -2152,7 +2152,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -2162,7 +2162,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
new file mode 100644
index 0000000..82991ae
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; xUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: GlobalISel does not work with bf16
+
+declare <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
index 425a853..477f0a6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
@@ -51,7 +51,7 @@ define amdgpu_kernel void @safe_no_fp32_denormals_rcp_f32(ptr addrspace(1) %out,
; SI-NOT: [[RESULT]]
; SI: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @safe_f32_denormals_rcp_pat_f32(ptr addrspace(1) %out, float %src) #4 {
- %rcp = fdiv float 1.0, %src, !fpmath !0
+ %rcp = fdiv afn float 1.0, %src, !fpmath !0
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -105,8 +105,8 @@ define amdgpu_kernel void @safe_rsq_rcp_pat_amdgcn_sqrt_f32_nocontract(ptr addrs
; SI: v_sqrt_f32_e32
; SI: v_rcp_f32_e32
define amdgpu_kernel void @unsafe_rsq_rcp_pat_f32(ptr addrspace(1) %out, float %src) #2 {
- %sqrt = call float @llvm.sqrt.f32(float %src)
- %rcp = call float @llvm.amdgcn.rcp.f32(float %sqrt)
+ %sqrt = call afn float @llvm.sqrt.f32(float %src)
+ %rcp = call afn float @llvm.amdgcn.rcp.f32(float %sqrt)
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -148,7 +148,7 @@ define amdgpu_kernel void @rcp_pat_f64(ptr addrspace(1) %out, double %src) #1 {
; SI: v_fma_f64
; SI: v_fma_f64
define amdgpu_kernel void @unsafe_rcp_pat_f64(ptr addrspace(1) %out, double %src) #2 {
- %rcp = fdiv double 1.0, %src
+ %rcp = fdiv afn double 1.0, %src
store double %rcp, ptr addrspace(1) %out, align 8
ret void
}
@@ -214,9 +214,9 @@ define amdgpu_kernel void @unsafe_amdgcn_sqrt_rsq_rcp_pat_f64(ptr addrspace(1) %
}
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #3 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index 8c1e166..7151fee 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -3227,72 +3227,6 @@ define float @v_exp_f32_fast(float %in) {
ret float %result
}
-define float @v_exp_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp.f32(float %in)
- ret float %result
-}
-
define float @v_exp_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index edc505b..918b1b2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -3235,78 +3235,6 @@ define float @v_exp10_f32_fast(float %in) {
ret float %result
}
-define float @v_exp10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp10.f32(float %in)
- ret float %result
-}
-
define float @v_exp10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp10_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index 38d1b47..307fa89 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -3076,121 +3076,6 @@ define float @v_log_f32_fast(float %in) {
ret float %result
}
-define float @v_log_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3f317218, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc1b17218, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3f317218, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc1b17218, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3f317218, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log.f32(float %in)
- ret float %result
-}
-
define float @v_log_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 058933f..5278589 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -3076,121 +3076,6 @@ define float @v_log10_f32_fast(float %in) {
ret float %result
}
-define float @v_log10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3e9a209b, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc11a209b, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3e9a209b, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc11a209b, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3e9a209b, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log10.f32(float %in)
- ret float %result
-}
-
define float @v_log10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log10_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
index 1e6b77e..702a69f 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
@@ -77,7 +77,7 @@ define amdgpu_kernel void @copy_flat(ptr nocapture %d, ptr nocapture readonly %s
; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], 16
; GFX1250-NEXT: s_cmp_lg_u32 s6, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1]
+; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[0:1], 16
; GFX1250-NEXT: s_cbranch_scc1 .LBB0_2
@@ -400,9 +400,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -438,9 +438,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
@@ -490,7 +490,7 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7]
+; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 16, v[0:1]
; GFX1250-NEXT: s_cbranch_scc1 .LBB4_2
@@ -531,9 +531,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -569,9 +569,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
diff --git a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
index be02045..4c0ab91 100644
--- a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
+++ b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
@@ -6982,7 +6982,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; CHECK-NEXT: global_store_dwordx4 v[100:101], v[96:99], off offset:16
; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0x800
; CHECK-NEXT: s_cbranch_scc1 .LBB6_2
-; CHECK-NEXT: .LBB6_3: ; %Flow9
+; CHECK-NEXT: .LBB6_3: ; %Flow7
; CHECK-NEXT: s_andn2_saveexec_b32 s8, s6
; CHECK-NEXT: s_cbranch_execz .LBB6_6
; CHECK-NEXT: ; %bb.4: ; %memmove_bwd_loop.preheader
@@ -7048,7 +7048,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; CHECK-NEXT: global_store_dwordx4 v[100:101], v[96:99], off offset:16
; CHECK-NEXT: s_cmp_eq_u64 s[4:5], s[6:7]
; CHECK-NEXT: s_cbranch_scc0 .LBB6_5
-; CHECK-NEXT: .LBB6_6: ; %Flow10
+; CHECK-NEXT: .LBB6_6: ; %Flow8
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_setpc_b64 s[30:31]
;
@@ -7689,7 +7689,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; ALIGNED-NEXT: global_store_byte v[16:17], v11, off offset:3
; ALIGNED-NEXT: global_store_byte v[16:17], v4, off offset:1
; ALIGNED-NEXT: s_cbranch_scc1 .LBB6_2
-; ALIGNED-NEXT: .LBB6_3: ; %Flow9
+; ALIGNED-NEXT: .LBB6_3: ; %Flow7
; ALIGNED-NEXT: s_andn2_saveexec_b32 s8, s6
; ALIGNED-NEXT: s_cbranch_execz .LBB6_6
; ALIGNED-NEXT: ; %bb.4: ; %memmove_bwd_loop.preheader
@@ -8316,7 +8316,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; ALIGNED-NEXT: global_store_byte v[16:17], v11, off offset:3
; ALIGNED-NEXT: global_store_byte v[16:17], v4, off offset:1
; ALIGNED-NEXT: s_cbranch_scc0 .LBB6_5
-; ALIGNED-NEXT: .LBB6_6: ; %Flow10
+; ALIGNED-NEXT: .LBB6_6: ; %Flow8
; ALIGNED-NEXT: s_or_b32 exec_lo, exec_lo, s8
; ALIGNED-NEXT: s_clause 0x7
; ALIGNED-NEXT: buffer_load_dword v47, off, s[0:3], s32
@@ -8369,7 +8369,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; UNROLL3-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:2032
; UNROLL3-NEXT: ; implicit-def: $vgpr2_vgpr3
; UNROLL3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; UNROLL3-NEXT: .LBB6_4: ; %Flow7
+; UNROLL3-NEXT: .LBB6_4: ; %Flow5
; UNROLL3-NEXT: s_andn2_saveexec_b32 s8, s6
; UNROLL3-NEXT: s_cbranch_execz .LBB6_7
; UNROLL3-NEXT: ; %bb.5: ; %memmove_bwd_residual
@@ -8403,7 +8403,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; UNROLL3-NEXT: global_store_dwordx4 v[16:17], v[12:15], off offset:32
; UNROLL3-NEXT: s_cmp_eq_u64 s[4:5], s[6:7]
; UNROLL3-NEXT: s_cbranch_scc0 .LBB6_6
-; UNROLL3-NEXT: .LBB6_7: ; %Flow8
+; UNROLL3-NEXT: .LBB6_7: ; %Flow6
; UNROLL3-NEXT: s_or_b32 exec_lo, exec_lo, s8
; UNROLL3-NEXT: s_setpc_b64 s[30:31]
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
index 272daa9..dd5c247 100644
--- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
@@ -460,10 +460,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB3_3
-; CHECK-NEXT: ; %bb.1: ; %Flow34
+; CHECK-NEXT: ; %bb.1: ; %Flow36
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_10
-; CHECK-NEXT: .LBB3_2: ; %Flow35
+; CHECK-NEXT: .LBB3_2: ; %Flow37
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -494,7 +494,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB3_5
-; CHECK-NEXT: .LBB3_6: ; %Flow29
+; CHECK-NEXT: .LBB3_6: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB3_9
@@ -520,7 +520,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB3_8
-; CHECK-NEXT: .LBB3_9: ; %Flow27
+; CHECK-NEXT: .LBB3_9: ; %Flow29
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
@@ -556,7 +556,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s5
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_cbranch_execnz .LBB3_12
-; CHECK-NEXT: .LBB3_13: ; %Flow33
+; CHECK-NEXT: .LBB3_13: ; %Flow35
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB3_16
@@ -584,7 +584,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_15
-; CHECK-NEXT: .LBB3_16: ; %Flow31
+; CHECK-NEXT: .LBB3_16: ; %Flow33
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s5
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
@@ -907,10 +907,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB6_3
-; CHECK-NEXT: ; %bb.1: ; %Flow41
+; CHECK-NEXT: ; %bb.1: ; %Flow39
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_10
-; CHECK-NEXT: .LBB6_2: ; %Flow42
+; CHECK-NEXT: .LBB6_2: ; %Flow40
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_setpc_b64 s[30:31]
; CHECK-NEXT: .LBB6_3: ; %memmove_copy_forward
@@ -940,7 +940,7 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB6_5
-; CHECK-NEXT: .LBB6_6: ; %Flow36
+; CHECK-NEXT: .LBB6_6: ; %Flow34
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB6_9
@@ -966,11 +966,11 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB6_8
-; CHECK-NEXT: .LBB6_9: ; %Flow34
+; CHECK-NEXT: .LBB6_9: ; %Flow32
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
@@ -1002,15 +1002,15 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s5
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_cbranch_execnz .LBB6_12
-; CHECK-NEXT: .LBB6_13: ; %Flow40
+; CHECK-NEXT: .LBB6_13: ; %Flow38
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB6_16
; CHECK-NEXT: ; %bb.14: ; %memmove_bwd_main_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: v_add_co_u32 v0, vcc_lo, v0, -16
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_15: ; %memmove_bwd_main_loop
@@ -1030,7 +1030,7 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_15
-; CHECK-NEXT: .LBB6_16: ; %Flow38
+; CHECK-NEXT: .LBB6_16: ; %Flow36
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s5
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -1181,8 +1181,8 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .LBB8_9: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
@@ -1219,10 +1219,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB8_16
; CHECK-NEXT: ; %bb.14: ; %memmove_bwd_main_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: v_add_co_u32 v0, vcc_lo, v0, -16
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_15: ; %memmove_bwd_main_loop
diff --git a/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll b/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll
new file mode 100644
index 0000000..c4a48a46
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll
@@ -0,0 +1,34 @@
+; REQUIRES: asserts
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -debug-only=gcn-subtarget < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s 2>&1 | FileCheck -check-prefixes=WARNING %s
+
+; CHECK: Post-MI-sched direction (postra-sched-topdown): topdown
+define float @postra-sched-topdown(float %input) nounwind #0 {
+ %x = fadd float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-bottomup): bottomup
+define float @postra-sched-bottomup(float %input) nounwind #1 {
+ %x = fsub float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-bidirectional): bidirectional
+define float @postra-sched-bidirectional(float %input) nounwind #2 {
+ %x = fadd float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-warning): topdown
+; WARNING: invalid value for postRA direction attribute
+define float @postra-sched-warning(float %input) nounwind #3 {
+ %x = fsub float %input, 1.000000e+00
+ ret float %x
+}
+
+attributes #0 = {"amdgpu-post-ra-direction"="topdown"}
+attributes #1 = {"amdgpu-post-ra-direction"="bottomup"}
+attributes #2 = {"amdgpu-post-ra-direction"="bidirectional"}
+attributes #3 = {"amdgpu-post-ra-direction"="warning"}
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 228420e..9f0ffbc 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -56,7 +56,7 @@ define float @v_rcp_f32_ieee_unsafe(float %x) #4 {
; R600: ; %bb.0:
; R600-NEXT: CF_END
; R600-NEXT: PAD
- %rcp = fdiv float 1.0, %x
+ %rcp = fdiv afn float 1.0, %x
ret float %rcp
}
@@ -1411,10 +1411,10 @@ define amdgpu_kernel void @s_div_arcp_neg_k_x_pat_f32_daz(ptr addrspace(1) %out)
declare float @llvm.fabs.f32(float) #1
declare float @llvm.sqrt.f32(float) #1
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
index f7e0388..f967e95 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -65,8 +65,8 @@ define amdgpu_kernel void @rsq_f32(ptr addrspace(1) noalias %out, ptr addrspace(
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -103,8 +103,8 @@ define amdgpu_kernel void @rsq_f32_sgpr(ptr addrspace(1) noalias %out, float %va
; GCN-UNSAFE-NEXT: s_mov_b32 s2, -1
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-UNSAFE-NEXT: s_endpgm
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -196,7 +196,7 @@ define amdgpu_kernel void @rsqrt_fmul(ptr addrspace(1) %out, ptr addrspace(1) %i
%x = call contract float @llvm.sqrt.f32(float %a)
%y = fmul contract float %x, %b
- %z = fdiv arcp contract float %c, %y
+ %z = fdiv arcp afn contract float %c, %y
store float %z, ptr addrspace(1) %out.gep
ret void
}
@@ -258,8 +258,8 @@ define amdgpu_kernel void @neg_rsq_f32(ptr addrspace(1) noalias %out, ptr addrsp
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -322,8 +322,8 @@ define amdgpu_kernel void @neg_rsq_neg_f32(ptr addrspace(1) noalias %out, ptr ad
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -343,8 +343,8 @@ define float @v_neg_rsq_neg_f32(float %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -367,8 +367,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg <2 x float> %val
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -387,8 +387,8 @@ define float @v_neg_rsq_neg_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.neg = fneg float %val0
- %sqrt = call contract float @llvm.sqrt.f32(float %val0.neg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0.neg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -412,8 +412,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32_foldable_user(<2 x float> %val0, <2 x fl
; GCN-IEEE-NEXT: v_mul_f32_e64 v1, -v1, v3
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.fneg = fneg <2 x float> %val0
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -432,8 +432,8 @@ define float @v_neg_rsq_f32(float %val) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -455,8 +455,8 @@ define <2 x float> @v_neg_rsq_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -474,8 +474,8 @@ define float @v_neg_rsq_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -643,8 +643,8 @@ define <2 x float> @v_neg_rsq_v2f32_foldable_user(<2 x float> %val0, <2 x float>
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v0, v0, v2
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v1, v1, v3
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -672,8 +672,8 @@ define float @v_rsq_f32(float %val) {
; GCN-IEEE-SAFE-NEXT: v_cndmask_b32_e64 v1, 0, 12, vcc
; GCN-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v0, v1
; GCN-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -756,9 +756,9 @@ define { float, float } @v_rsq_f32_multi_use(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v2, vcc, 0, v2
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v1, v1, v2
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
%insert.0 = insertvalue { float, float } poison, float %sqrt, 0
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
%insert.1 = insertvalue { float, float } %insert.0, float %div, 1
ret { float, float } %insert.1
}
@@ -838,8 +838,8 @@ define float @v_rsq_f32_missing_contract0(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v0, vcc, 0, v0
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v1, v0
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -855,8 +855,8 @@ define float @v_rsq_f32_missing_contract1(float %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -876,8 +876,8 @@ define float @v_rsq_f32_contractable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -897,8 +897,8 @@ define float @v_rsq_f32_contractable_user_missing_contract0(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -918,8 +918,8 @@ define float @v_rsq_f32_contractable_user_missing_contract1(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd float %div, %val1
ret float %add
}
@@ -953,8 +953,8 @@ define float @v_rsq_f32_known_never_posdenormal(float nofpclass(psub) %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
index b78cbb0..4aac193 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
@@ -4504,7 +4504,7 @@ define <2 x double> @v_rsq_v2f64__afn_nnan_ninf(<2 x double> %x) {
ret <2 x double> %rsq
}
-define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
+define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) {
; SI-SDAG-LABEL: s_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0
@@ -4648,8 +4648,8 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
; VI-GISEL-NEXT: v_readfirstlane_b32 s0, v0
; VI-GISEL-NEXT: v_readfirstlane_b32 s1, v1
; VI-GISEL-NEXT: ; return to shader part epilog
- %rsq = call contract double @llvm.sqrt.f64(double %x)
- %result = fdiv contract double 1.0, %rsq
+ %rsq = call contract afn double @llvm.sqrt.f64(double %x)
+ %result = fdiv contract afn double 1.0, %rsq
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
@@ -4660,7 +4660,7 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
ret <2 x i32> %insert.1
}
-define double @v_rsq_f64_unsafe(double %x) #0 {
+define double @v_rsq_f64_unsafe(double %x) {
; SI-SDAG-LABEL: v_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4800,8 +4800,8 @@ define double @v_rsq_f64_unsafe(double %x) #0 {
; VI-GISEL-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], 1.0
; VI-GISEL-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[2:3]
; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call double @llvm.sqrt.f64(double %x)
- %rsq = fdiv double 1.0, %sqrt
+ %sqrt = call afn contract double @llvm.sqrt.f64(double %x)
+ %rsq = fdiv afn contract double 1.0, %sqrt
ret double %rsq
}
@@ -5737,7 +5737,6 @@ define double @v_div_const_contract_sqrt_f64(double %x) {
ret double %rsq
}
-attributes #0 = { "unsafe-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN: {{.*}}
; GISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/udivrem24.ll b/llvm/test/CodeGen/AMDGPU/udivrem24.ll
index 5477d62..1e5ec59 100644
--- a/llvm/test/CodeGen/AMDGPU/udivrem24.ll
+++ b/llvm/test/CodeGen/AMDGPU/udivrem24.ll
@@ -1,18 +1,103 @@
-; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=VI %s
+; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s
-; FUNC-LABEL: {{^}}udiv24_i8:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i8:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -21,17 +106,101 @@ define amdgpu_kernel void @udiv24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_in_out:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_in_out(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; SI-LABEL: udiv24_i8_denorm_flush_in_out:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_in_out:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_in_out:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -40,17 +209,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_in_out(ptr addrspace(1) %out,
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_in:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_in(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; SI-LABEL: udiv24_i8_denorm_flush_in:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_in:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_in:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -59,17 +312,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_in(ptr addrspace(1) %out, ptr
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_out:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_out(ptr addrspace(1) %out, ptr addrspace(1) %in) #2 {
+; SI-LABEL: udiv24_i8_denorm_flush_out:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_out:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_out:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -78,17 +415,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_out(ptr addrspace(1) %out, ptr
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i16:
-; SI: v_cvt_f32_u32
-; SI: v_cvt_f32_u32
-; SI: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ushort v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0 offset:2
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_u32_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_u32_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
+; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_u32_e32 v0, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_u32_e32 v1, v1
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i16:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 2, #1
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i16, ptr addrspace(1) %in, i16 1
%num = load i16, ptr addrspace(1) %in, align 2
%den = load i16, ptr addrspace(1) %den_ptr, align 2
@@ -97,17 +518,85 @@ define amdgpu_kernel void @udiv24_i16(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}udiv23_i32:
-; SI: v_cvt_f32_u32
-; SI-DAG: v_cvt_f32_u32
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -120,11 +609,88 @@ define amdgpu_kernel void @udiv23_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @udiv24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -137,11 +703,88 @@ define amdgpu_kernel void @udiv24_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}no_udiv24_u23_u24_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @no_udiv24_u23_u24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: no_udiv24_u23_u24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: no_udiv24_u23_u24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: no_udiv24_u23_u24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -154,11 +797,88 @@ define amdgpu_kernel void @no_udiv24_u23_u24_i32(ptr addrspace(1) %out, ptr addr
ret void
}
-; FUNC-LABEL: {{^}}no_udiv24_u24_u23_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @no_udiv24_u24_u23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: no_udiv24_u24_u23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: no_udiv24_u24_u23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: no_udiv24_u24_u23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -171,14 +891,113 @@ define amdgpu_kernel void @no_udiv24_u24_u23_i32(ptr addrspace(1) %out, ptr addr
ret void
}
-; FUNC-LABEL: {{^}}udiv25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @udiv25_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv25_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv25_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv25_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -191,14 +1010,113 @@ define amdgpu_kernel void @udiv25_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}test_no_udiv24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_udiv24_i32_1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_udiv24_i32_1:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_udiv24_i32_1:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0xffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_udiv24_i32_1:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -211,14 +1129,113 @@ define amdgpu_kernel void @test_no_udiv24_i32_1(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_no_udiv24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_udiv24_i32_2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_udiv24_i32_2:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_udiv24_i32_2:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_udiv24_i32_2:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -231,17 +1248,107 @@ define amdgpu_kernel void @test_no_udiv24_i32_2(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}urem24_i8:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @urem24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i8:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v3, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v4, v3
+; SI-NEXT: v_mul_f32_e32 v4, v2, v4
+; SI-NEXT: v_trunc_f32_e32 v4, v4
+; SI-NEXT: v_fma_f32 v2, -v4, v3, v2
+; SI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v2|, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
+; SI-NEXT: v_mul_lo_u32 v1, v2, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i8:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v3, v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v4, v1
+; VI-NEXT: v_mul_f32_e32 v3, v4, v3
+; VI-NEXT: v_trunc_f32_e32 v3, v3
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v3
+; VI-NEXT: v_mad_f32 v3, -v3, v2, v4
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v2
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v2, v0
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i8:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 25, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.W, T0.X,
+; EG-NEXT: MUL_IEEE * T1.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: MULADD_IEEE T0.W, -PV.W, T0.Y, T0.W,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: SETGE * T0.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.Y, T1.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.X,
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT * T1.W, T0.X, PS,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -250,17 +1357,107 @@ define amdgpu_kernel void @urem24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in
ret void
}
-; FUNC-LABEL: {{^}}urem24_i16:
-; SI: v_cvt_f32_u32
-; SI: v_cvt_f32_u32
-; SI: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @urem24_i16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ushort v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0 offset:2
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_u32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v4, v3
+; SI-NEXT: v_mul_f32_e32 v4, v2, v4
+; SI-NEXT: v_trunc_f32_e32 v4, v4
+; SI-NEXT: v_fma_f32 v2, -v4, v3, v2
+; SI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v2|, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
+; SI-NEXT: v_mul_lo_u32 v1, v2, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
+; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_u32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; VI-NEXT: v_rcp_iflag_f32_e32 v4, v2
+; VI-NEXT: v_mul_f32_e32 v4, v3, v4
+; VI-NEXT: v_trunc_f32_e32 v4, v4
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v4
+; VI-NEXT: v_mad_f32 v3, -v4, v2, v3
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v2
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v2, v0
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i16:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 25, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 2, #1
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.W, T0.X,
+; EG-NEXT: MUL_IEEE * T1.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: MULADD_IEEE T0.W, -PV.W, T0.Y, T0.W,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: SETGE * T0.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.Y, T1.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.X,
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT * T1.W, T0.X, PS,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i16, ptr addrspace(1) %in, i16 1
%num = load i16, ptr addrspace(1) %in, align 2
%den = load i16, ptr addrspace(1) %den_ptr, align 2
@@ -269,10 +1466,90 @@ define amdgpu_kernel void @urem24_i16(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}urem24_i32:
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @urem24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s6, s4, 0xffffff
+; SI-NEXT: s_and_b32 s7, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s6
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s7
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_mul_lo_u32 v0, v0, s5
+; SI-NEXT: v_sub_i32_e32 v0, vcc, s4, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s2, s5, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s2
+; VI-NEXT: s_and_b32 s2, s4, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v0, s5
+; VI-NEXT: v_sub_u32_e32 v0, vcc, s4, v0
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -285,14 +1562,105 @@ define amdgpu_kernel void @urem24_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}urem25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @urem25_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem25_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s4, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem25_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem25_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -305,14 +1673,105 @@ define amdgpu_kernel void @urem25_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}test_no_urem24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_urem24_i32_1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_urem24_i32_1:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0xffffff
+; SI-NEXT: s_and_b32 s4, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_urem24_i32_1:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0xffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_urem24_i32_1:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -325,14 +1784,105 @@ define amdgpu_kernel void @test_no_urem24_i32_1(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_no_urem24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_urem24_i32_2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_urem24_i32_2:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s4, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_urem24_i32_2:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_urem24_i32_2:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -345,12 +1895,85 @@ define amdgpu_kernel void @test_no_urem24_i32_2(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_udiv24_u16_u23_i32:
-; SI: v_rcp_iflag_f32
-; SI: v_and_b32_e32 v{{[0-9]+}}, 0x7fffff,
-
-; EG: RECIP_IEEE
define amdgpu_kernel void @test_udiv24_u16_u23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_udiv24_u16_u23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_udiv24_u16_u23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_udiv24_u16_u23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -363,12 +1986,85 @@ define amdgpu_kernel void @test_udiv24_u16_u23_i32(ptr addrspace(1) %out, ptr ad
ret void
}
-; FUNC-LABEL: {{^}}test_udiv24_u23_u16_i32:
-; SI: v_rcp_iflag_f32
-; SI: v_and_b32_e32 v{{[0-9]+}}, 0x7fffff,
-
-; EG: RECIP_IEEE
define amdgpu_kernel void @test_udiv24_u23_u16_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_udiv24_u23_u16_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_udiv24_u23_u16_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_udiv24_u23_u16_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll b/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll
new file mode 100644
index 0000000..ea12732
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll
@@ -0,0 +1,16 @@
+; RUN: not llc -mcpu=gfx1100 -mtriple=amdgcn-amd-amdhsa -stress-regalloc=4 -filetype=null -verify-machineinstrs %s 2>&1 | FileCheck %s
+
+; CHECK: error: <unknown>:0:0: ran out of registers during register allocation in function 'f'
+; CHECK-NOT: Bad machine code
+
+define <16 x half> @f(i1 %LGV2, <16 x half> %0) {
+BB:
+ br i1 %LGV2, label %SW_C3, label %SW_C
+
+SW_C: ; preds = %BB
+ %B1 = fmul <16 x half> %0, zeroinitializer
+ ret <16 x half> %B1
+
+SW_C3: ; preds = %BB
+ ret <16 x half> <half 0xH0000, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison>
+}
diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
index aea2a8b..f2ecfe8 100644
--- a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s
define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
; GFX950-LABEL: v_ashr_pk_i8_i32:
; GFX950: ; %bb.0:
@@ -13,6 +14,20 @@ define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_i8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_i8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_i8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
@@ -40,6 +55,20 @@ define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_u8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_u8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_u8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
diff --git a/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..8f7b050
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..05e1e8b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..a65e5c5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/nop_concat_vectors.ll b/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
index cda1e83..aa3cdc3 100644
--- a/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
+++ b/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
@@ -1,10 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
-;CHECK: _foo
-;CHECK-NOT: vld1.32
-;CHECK-NOT: vst1.32
-;CHECK: bx
define void @foo(ptr %J) {
+; CHECK-LABEL: foo:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: bx lr
%A = load <16 x i8>, ptr %J
%T1 = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%T2 = shufflevector <8 x i8> %T1, <8 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
index 0f711630..4a65a53 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Descriptor Range type: Invalid
+; CHECK: error: Invalid Descriptor Range type
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
index 6551116..031dfca 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: NOTRootFlags
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
index 579528d..2739320 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: Invalid
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
entry:
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
index 7e7d56e..855e0c0 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MaxLOD: 0
+; CHECK: error: Invalid value for MaxLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
index d958f10..812749b 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MinLOD: 0
+; CHECK: error: Invalid value for MinLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
index 34b27eb..6898aec 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MipLODBias: 666
+; CHECK: error: Invalid value for MipLODBias: 6.660000e+02
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
index b23366b..f5430df 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
@@ -1,20 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -verify-machineinstrs < %s | FileCheck %s
@.str = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, 3'RD STRING\00", align 1
@.str1 = private unnamed_addr constant [3 x i8] c"%s\00", align 1
-; Function Attrs: nounwind
declare i32 @printf(i8* nocapture readonly, ...)
; Function Attrs: nounwind
-define i32 @main() {
+define i32 @main() nounwind {
; CHECK-LABEL: main:
-; CHECK: .cfi_startproc
-; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: .cfi_def_cfa r30, 8
-; CHECK-NEXT: .cfi_offset r31, -4
-; CHECK-NEXT: .cfi_offset r30, -8
+; CHECK: // %bb.0: // %entry
; CHECK-NEXT: {
; CHECK-NEXT: r0 = ##.L.str1
; CHECK-NEXT: r3:2 = CONST64(#2325073635944967245)
@@ -53,5 +48,4 @@ entry:
ret i32 0
}
-; Function Attrs: nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
diff --git a/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..3f7590a
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,91 @@
+;; Test MIR printer and parser for type id field in call site info. Test that
+;; it works well with/without --emit-call-site-info.
+
+;; Multiplex --call-graph-section and -emit-call-site-info as both utilize
+;; CallSiteInfo and callSites.
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with --call-graph-section only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is not set, calleeTypeIds is set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section %s -stop-after=finalize-isel -o %t1.mir
+; RUN: cat %t1.mir | FileCheck %s --check-prefix=PRINTER_CGS
+; PRINTER_CGS: name: main
+; PRINTER_CGS: callSites:
+; PRINTER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PRINTER_CGS-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section %t1.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS
+; PARSER_CGS: name: main
+; PARSER_CGS: callSites:
+; PARSER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PARSER_CGS-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with -emit-call-site-info only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is set, calleeTypeIds is not set.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CSI
+; PRINTER_CSI: name: main
+; PRINTER_CSI: callSites:
+; PRINTER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PRINTER_CSI-NOT: calleeTypeIds:
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CSI
+; PARSER_CSI: name: main
+; PARSER_CSI: callSites:
+; PARSER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PARSER_CSI-NOT: calleeTypeIds:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with both -emit-call-site-info and --call-graph-section.
+
+;; Test printer.
+;; Verify both fwdArgRegs and calleeTypeIds are set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CGS_CSI
+; PRINTER_CGS_CSI: name: main
+; PRINTER_CGS_CSI: callSites:
+; PRINTER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PRINTER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS_CSI
+; PARSER_CGS_CSI: name: main
+; PARSER_CGS_CSI: callSites:
+; PARSER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PARSER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..a66a884
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..e7f162c
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..9f5e858
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/NVPTX/fold-movs.ll b/llvm/test/CodeGen/NVPTX/fold-movs.ll
new file mode 100644
index 0000000..6ee0fb2
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fold-movs.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK-F32X2
+; RUN: %if ptxas-12.7 %{ \
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+target triple = "nvptx64-nvidia-cuda"
+
+; Since fdiv doesn't support f32x2, this will create BUILD_VECTORs that will be
+; folded into the store, turning it into st.global.v8.b32.
+define void @writevec(<8 x float> %v1, <8 x float> %v2, ptr addrspace(1) %p) {
+; CHECK-F32X2-LABEL: writevec(
+; CHECK-F32X2: {
+; CHECK-F32X2-NEXT: .reg .b32 %r<25>;
+; CHECK-F32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT: // %bb.0:
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [writevec_param_0];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [writevec_param_0+16];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [writevec_param_1+16];
+; CHECK-F32X2-NEXT: div.rn.f32 %r13, %r8, %r12;
+; CHECK-F32X2-NEXT: div.rn.f32 %r14, %r7, %r11;
+; CHECK-F32X2-NEXT: div.rn.f32 %r15, %r6, %r10;
+; CHECK-F32X2-NEXT: div.rn.f32 %r16, %r5, %r9;
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r17, %r18, %r19, %r20}, [writevec_param_1];
+; CHECK-F32X2-NEXT: div.rn.f32 %r21, %r4, %r20;
+; CHECK-F32X2-NEXT: div.rn.f32 %r22, %r3, %r19;
+; CHECK-F32X2-NEXT: div.rn.f32 %r23, %r2, %r18;
+; CHECK-F32X2-NEXT: div.rn.f32 %r24, %r1, %r17;
+; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [writevec_param_2];
+; CHECK-F32X2-NEXT: st.global.v8.b32 [%rd1], {%r24, %r23, %r22, %r21, %r16, %r15, %r14, %r13};
+; CHECK-F32X2-NEXT: ret;
+ %v = fdiv <8 x float> %v1, %v2
+ store <8 x float> %v, ptr addrspace(1) %p, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index 06c2cc8..26336b8 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -343,61 +343,77 @@ define <4 x i8> @test_smax(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smax(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smax_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smax_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smax(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smax_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smax_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sgt <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
@@ -473,61 +489,77 @@ define <4 x i8> @test_smin(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smin(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smin_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smin_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smin(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smin_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smin_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sle <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
diff --git a/llvm/test/CodeGen/NVPTX/ld-param-sink.ll b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
new file mode 100644
index 0000000..03523a3
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs | FileCheck %s
+; RUN: %if ptxas %{ llc < %s | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare ptr @bar(i64)
+declare i64 @baz()
+
+define ptr @foo(i1 %cond) {
+; CHECK-LABEL: foo(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.b8 %rs1, [foo_param_0];
+; CHECK-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0;
+; CHECK-NEXT: { // callseq 0, 0
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: call.uni (retval0), baz, ();
+; CHECK-NEXT: ld.param.b64 %rd2, [retval0];
+; CHECK-NEXT: } // callseq 0
+; CHECK-NEXT: @%p1 bra $L__BB0_2;
+; CHECK-NEXT: // %bb.1: // %bb
+; CHECK-NEXT: { // callseq 1, 0
+; CHECK-NEXT: .param .b64 param0;
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-NEXT: call.uni (retval0), bar, (param0);
+; CHECK-NEXT: } // callseq 1
+; CHECK-NEXT: $L__BB0_2: // %common.ret
+; CHECK-NEXT: st.param.b64 [func_retval0], 0;
+; CHECK-NEXT: ret;
+entry:
+ %call = call i64 @baz()
+ br i1 %cond, label %common.ret, label %bb
+
+bb:
+ %tmp = call ptr @bar(i64 %call)
+ br label %common.ret
+
+common.ret:
+ ret ptr null
+}
diff --git a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
index 9f62477..af0942e 100644
--- a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
+++ b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
@@ -56,155 +56,153 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: .cfi_offset v29, -240
; CHECK-NEXT: .cfi_offset v30, -224
; CHECK-NEXT: .cfi_offset v31, -208
+; CHECK-NEXT: std 14, 400(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 15, 408(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 2, 728(1)
+; CHECK-NEXT: ld 14, 688(1)
+; CHECK-NEXT: ld 11, 704(1)
+; CHECK-NEXT: std 20, 448(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 21, 456(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 21, 5
+; CHECK-NEXT: lwa 5, 0(7)
+; CHECK-NEXT: ld 7, 720(1)
; CHECK-NEXT: std 22, 464(1) # 8-byte Folded Spill
; CHECK-NEXT: std 23, 472(1) # 8-byte Folded Spill
-; CHECK-NEXT: mr 22, 5
-; CHECK-NEXT: ld 5, 848(1)
+; CHECK-NEXT: mr 22, 6
+; CHECK-NEXT: ld 6, 848(1)
; CHECK-NEXT: addi 3, 3, 1
-; CHECK-NEXT: mr 11, 7
-; CHECK-NEXT: ld 23, 688(1)
-; CHECK-NEXT: ld 7, 728(1)
+; CHECK-NEXT: ld 15, 736(1)
; CHECK-NEXT: std 18, 432(1) # 8-byte Folded Spill
; CHECK-NEXT: std 19, 440(1) # 8-byte Folded Spill
-; CHECK-NEXT: mr 18, 6
-; CHECK-NEXT: li 6, 9
; CHECK-NEXT: ld 19, 768(1)
-; CHECK-NEXT: ld 2, 760(1)
-; CHECK-NEXT: std 26, 496(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 27, 504(1) # 8-byte Folded Spill
-; CHECK-NEXT: cmpldi 3, 9
-; CHECK-NEXT: ld 27, 816(1)
-; CHECK-NEXT: ld 26, 808(1)
-; CHECK-NEXT: std 14, 400(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 15, 408(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 15, 736(1)
-; CHECK-NEXT: lxv 39, 0(8)
+; CHECK-NEXT: ld 18, 760(1)
; CHECK-NEXT: std 30, 528(1) # 8-byte Folded Spill
; CHECK-NEXT: std 31, 536(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 30, 704(1)
-; CHECK-NEXT: lxv 38, 0(9)
-; CHECK-NEXT: std 20, 448(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 21, 456(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 21, 784(1)
+; CHECK-NEXT: ld 12, 696(1)
+; CHECK-NEXT: lxv 0, 0(9)
+; CHECK-NEXT: std 9, 64(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 10, 72(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 1, 0(8)
+; CHECK-NEXT: cmpldi 3, 9
+; CHECK-NEXT: ld 30, 824(1)
+; CHECK-NEXT: std 28, 512(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 29, 520(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 29, 840(1)
+; CHECK-NEXT: ld 28, 832(1)
+; CHECK-NEXT: std 16, 416(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 17, 424(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 23, 784(1)
; CHECK-NEXT: ld 20, 776(1)
; CHECK-NEXT: std 24, 480(1) # 8-byte Folded Spill
; CHECK-NEXT: std 25, 488(1) # 8-byte Folded Spill
-; CHECK-NEXT: iselgt 3, 3, 6
-; CHECK-NEXT: ld 6, 720(1)
+; CHECK-NEXT: ld 25, 800(1)
; CHECK-NEXT: ld 24, 792(1)
-; CHECK-NEXT: std 10, 72(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 7, 80(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 26, 496(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 27, 504(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 27, 816(1)
+; CHECK-NEXT: ld 26, 808(1)
+; CHECK-NEXT: stfd 26, 544(1) # 8-byte Folded Spill
+; CHECK-NEXT: stfd 27, 552(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 17, 752(1)
+; CHECK-NEXT: extswsli 9, 5, 3
+; CHECK-NEXT: lxv 4, 0(14)
+; CHECK-NEXT: std 14, 32(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 12, 40(1) # 8-byte Folded Spill
+; CHECK-NEXT: mulli 0, 5, 40
+; CHECK-NEXT: sldi 14, 5, 5
+; CHECK-NEXT: mulli 31, 5, 24
+; CHECK-NEXT: lxv 38, 0(2)
+; CHECK-NEXT: lxv 2, 0(11)
+; CHECK-NEXT: std 2, 80(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 15, 88(1) # 8-byte Folded Spill
+; CHECK-NEXT: mulli 2, 5, 48
+; CHECK-NEXT: sldi 5, 5, 4
+; CHECK-NEXT: ld 16, 744(1)
+; CHECK-NEXT: lxv 5, 0(10)
+; CHECK-NEXT: std 6, 200(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 29, 192(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 6, 712(1)
+; CHECK-NEXT: mr 10, 7
+; CHECK-NEXT: add 7, 14, 21
+; CHECK-NEXT: lxv 13, 0(19)
+; CHECK-NEXT: std 8, 48(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 6, 56(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 8, 11
+; CHECK-NEXT: li 11, 9
+; CHECK-NEXT: iselgt 3, 3, 11
; CHECK-NEXT: addi 3, 3, -2
-; CHECK-NEXT: lxv 6, 0(19)
-; CHECK-NEXT: lxv 11, 0(7)
-; CHECK-NEXT: std 5, 200(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 23, 40(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 6, 48(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 840(1)
-; CHECK-NEXT: lxv 12, 0(6)
-; CHECK-NEXT: rldicl 12, 3, 61, 3
+; CHECK-NEXT: rldicl 11, 3, 61, 3
+; CHECK-NEXT: lxv 3, 0(12)
+; CHECK-NEXT: lxv 40, 0(6)
+; CHECK-NEXT: std 18, 112(1) # 8-byte Folded Spill
; CHECK-NEXT: std 19, 120(1) # 8-byte Folded Spill
+; CHECK-NEXT: add 19, 21, 5
+; CHECK-NEXT: ld 5, 200(1) # 8-byte Folded Reload
+; CHECK-NEXT: lxv 39, 0(10)
+; CHECK-NEXT: addi 3, 7, 32
+; CHECK-NEXT: add 12, 31, 21
; CHECK-NEXT: std 20, 128(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 21, 136(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 24, 144(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 4, 0(21)
-; CHECK-NEXT: ld 25, 800(1)
-; CHECK-NEXT: lxv 33, 0(10)
-; CHECK-NEXT: lxv 32, 0(23)
-; CHECK-NEXT: lxv 36, 0(30)
-; CHECK-NEXT: std 16, 416(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 17, 424(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 17, 752(1)
-; CHECK-NEXT: ld 16, 744(1)
-; CHECK-NEXT: std 28, 512(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 29, 520(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 29, 712(1)
-; CHECK-NEXT: ld 28, 696(1)
-; CHECK-NEXT: std 8, 56(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 9, 64(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 37, 0(28)
-; CHECK-NEXT: lxv 13, 0(29)
-; CHECK-NEXT: mr 8, 29
-; CHECK-NEXT: mr 9, 30
-; CHECK-NEXT: mr 10, 28
-; CHECK-NEXT: std 25, 152(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 23, 136(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 33, 0(15)
+; CHECK-NEXT: lxv 32, 0(16)
; CHECK-NEXT: std 26, 160(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 10, 0(15)
-; CHECK-NEXT: lxv 9, 0(16)
-; CHECK-NEXT: li 28, 1
-; CHECK-NEXT: stfd 26, 544(1) # 8-byte Folded Spill
-; CHECK-NEXT: stfd 27, 552(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 8, 0(17)
-; CHECK-NEXT: lxv 7, 0(2)
+; CHECK-NEXT: std 27, 168(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 37, 0(17)
+; CHECK-NEXT: lxv 36, 0(18)
+; CHECK-NEXT: std 30, 176(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 28, 184(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 12, 0(20)
+; CHECK-NEXT: lxv 11, 0(23)
+; CHECK-NEXT: add 20, 21, 9
; CHECK-NEXT: stfd 28, 560(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 29, 568(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 5, 0(20)
-; CHECK-NEXT: lxv 3, 0(24)
+; CHECK-NEXT: lxv 10, 0(24)
+; CHECK-NEXT: lxv 9, 0(25)
; CHECK-NEXT: stfd 30, 576(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 31, 584(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 2, 0(25)
-; CHECK-NEXT: lxv 1, 0(26)
+; CHECK-NEXT: lxv 8, 0(26)
+; CHECK-NEXT: lxv 7, 0(27)
+; CHECK-NEXT: addi 12, 12, 32
+; CHECK-NEXT: li 27, 0
+; CHECK-NEXT: mr 26, 21
; CHECK-NEXT: stxv 52, 208(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 53, 224(1) # 16-byte Folded Spill
-; CHECK-NEXT: lxv 0, 0(27)
+; CHECK-NEXT: lxv 6, 0(30)
+; CHECK-NEXT: lxv 41, 0(28)
+; CHECK-NEXT: addi 7, 11, 1
+; CHECK-NEXT: add 11, 0, 21
+; CHECK-NEXT: li 28, 1
; CHECK-NEXT: stxv 54, 240(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 55, 256(1) # 16-byte Folded Spill
+; CHECK-NEXT: lxv 43, 0(29)
+; CHECK-NEXT: lxv 42, 0(5)
; CHECK-NEXT: stxv 56, 272(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 57, 288(1) # 16-byte Folded Spill
+; CHECK-NEXT: addi 11, 11, 32
; CHECK-NEXT: stxv 58, 304(1) # 16-byte Folded Spill
-; CHECK-NEXT: std 5, 192(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 832(1)
; CHECK-NEXT: stxv 59, 320(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 60, 336(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 61, 352(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 62, 368(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 63, 384(1) # 16-byte Folded Spill
-; CHECK-NEXT: std 15, 88(1) # 8-byte Folded Spill
; CHECK-NEXT: std 16, 96(1) # 8-byte Folded Spill
; CHECK-NEXT: std 17, 104(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 2, 112(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 5, 184(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 824(1)
-; CHECK-NEXT: std 5, 176(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 27, 168(1) # 8-byte Folded Spill
-; CHECK-NEXT: lwa 5, 0(11)
-; CHECK-NEXT: li 27, 0
-; CHECK-NEXT: ld 7, 176(1) # 8-byte Folded Reload
-; CHECK-NEXT: mulli 6, 5, 40
-; CHECK-NEXT: sldi 0, 5, 4
-; CHECK-NEXT: extswsli 14, 5, 3
-; CHECK-NEXT: lxv 40, 0(7)
-; CHECK-NEXT: ld 7, 184(1) # 8-byte Folded Reload
-; CHECK-NEXT: add 31, 14, 22
-; CHECK-NEXT: add 11, 0, 22
-; CHECK-NEXT: mr 26, 22
-; CHECK-NEXT: addi 3, 11, 32
-; CHECK-NEXT: addi 11, 12, 1
-; CHECK-NEXT: mulli 12, 5, 48
-; CHECK-NEXT: addi 31, 31, 32
-; CHECK-NEXT: add 19, 22, 6
-; CHECK-NEXT: sldi 6, 5, 5
-; CHECK-NEXT: mulli 5, 5, 24
-; CHECK-NEXT: lxv 41, 0(7)
-; CHECK-NEXT: add 20, 22, 6
-; CHECK-NEXT: add 21, 22, 5
-; CHECK-NEXT: ld 5, 192(1) # 8-byte Folded Reload
-; CHECK-NEXT: lxv 43, 0(5)
-; CHECK-NEXT: ld 5, 200(1) # 8-byte Folded Reload
-; CHECK-NEXT: lxv 42, 0(5)
+; CHECK-NEXT: std 24, 144(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 25, 152(1) # 8-byte Folded Spill
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB0_3: # %_loop_2_do_.lr.ph
; CHECK-NEXT: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB0_4 Depth 2
-; CHECK-NEXT: maddld 5, 12, 27, 0
-; CHECK-NEXT: mr 6, 18
-; CHECK-NEXT: mr 29, 21
+; CHECK-NEXT: maddld 5, 2, 27, 0
+; CHECK-NEXT: mr 6, 22
; CHECK-NEXT: mr 30, 20
-; CHECK-NEXT: mr 2, 19
-; CHECK-NEXT: mtctr 11
-; CHECK-NEXT: add 25, 22, 5
-; CHECK-NEXT: maddld 5, 12, 27, 14
-; CHECK-NEXT: add 24, 22, 5
+; CHECK-NEXT: mr 29, 19
+; CHECK-NEXT: mtctr 7
+; CHECK-NEXT: add 25, 21, 5
+; CHECK-NEXT: maddld 5, 2, 27, 14
+; CHECK-NEXT: add 24, 21, 5
+; CHECK-NEXT: maddld 5, 2, 27, 31
+; CHECK-NEXT: add 23, 21, 5
; CHECK-NEXT: mr 5, 26
; CHECK-NEXT: .p2align 5
; CHECK-NEXT: .LBB0_4: # %_loop_2_do_
@@ -212,66 +210,66 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lxvp 34, 0(6)
; CHECK-NEXT: lxvp 44, 0(5)
-; CHECK-NEXT: xvmaddadp 39, 45, 35
-; CHECK-NEXT: lxvp 46, 0(24)
-; CHECK-NEXT: xvmaddadp 38, 47, 35
-; CHECK-NEXT: lxvp 48, 0(25)
-; CHECK-NEXT: lxvp 50, 0(29)
-; CHECK-NEXT: lxvp 62, 0(30)
-; CHECK-NEXT: lxvp 60, 0(2)
+; CHECK-NEXT: xvmaddadp 1, 45, 35
+; CHECK-NEXT: lxvp 46, 0(30)
+; CHECK-NEXT: xvmaddadp 0, 47, 35
+; CHECK-NEXT: lxvp 48, 0(29)
+; CHECK-NEXT: lxvp 50, 0(23)
+; CHECK-NEXT: lxvp 62, 0(24)
+; CHECK-NEXT: lxvp 60, 0(25)
; CHECK-NEXT: lxvp 58, 32(6)
; CHECK-NEXT: lxvp 56, 32(5)
-; CHECK-NEXT: lxvp 54, 32(24)
-; CHECK-NEXT: lxvp 52, 32(25)
-; CHECK-NEXT: lxvp 30, 32(29)
-; CHECK-NEXT: lxvp 28, 32(30)
-; CHECK-NEXT: lxvp 26, 32(2)
-; CHECK-NEXT: xvmaddadp 33, 49, 35
-; CHECK-NEXT: xvmaddadp 32, 51, 35
-; CHECK-NEXT: xvmaddadp 37, 63, 35
-; CHECK-NEXT: xvmaddadp 36, 61, 35
-; CHECK-NEXT: xvmaddadp 13, 44, 34
-; CHECK-NEXT: xvmaddadp 12, 46, 34
-; CHECK-NEXT: xvmaddadp 11, 48, 34
-; CHECK-NEXT: xvmaddadp 10, 50, 34
-; CHECK-NEXT: xvmaddadp 9, 62, 34
-; CHECK-NEXT: xvmaddadp 8, 60, 34
-; CHECK-NEXT: xvmaddadp 7, 57, 59
-; CHECK-NEXT: xvmaddadp 6, 55, 59
-; CHECK-NEXT: xvmaddadp 5, 53, 59
-; CHECK-NEXT: xvmaddadp 4, 31, 59
-; CHECK-NEXT: xvmaddadp 3, 29, 59
-; CHECK-NEXT: xvmaddadp 2, 27, 59
-; CHECK-NEXT: xvmaddadp 1, 56, 58
-; CHECK-NEXT: xvmaddadp 0, 54, 58
-; CHECK-NEXT: xvmaddadp 40, 52, 58
+; CHECK-NEXT: lxvp 54, 32(30)
+; CHECK-NEXT: lxvp 52, 32(29)
+; CHECK-NEXT: lxvp 30, 32(23)
+; CHECK-NEXT: lxvp 28, 32(24)
+; CHECK-NEXT: lxvp 26, 32(25)
+; CHECK-NEXT: xvmaddadp 5, 49, 35
+; CHECK-NEXT: xvmaddadp 4, 51, 35
+; CHECK-NEXT: xvmaddadp 3, 63, 35
+; CHECK-NEXT: xvmaddadp 2, 61, 35
+; CHECK-NEXT: xvmaddadp 40, 44, 34
+; CHECK-NEXT: xvmaddadp 39, 46, 34
+; CHECK-NEXT: xvmaddadp 38, 48, 34
+; CHECK-NEXT: xvmaddadp 33, 50, 34
+; CHECK-NEXT: xvmaddadp 32, 62, 34
+; CHECK-NEXT: xvmaddadp 37, 60, 34
+; CHECK-NEXT: xvmaddadp 36, 57, 59
+; CHECK-NEXT: xvmaddadp 13, 55, 59
+; CHECK-NEXT: xvmaddadp 12, 53, 59
+; CHECK-NEXT: xvmaddadp 11, 31, 59
+; CHECK-NEXT: xvmaddadp 10, 29, 59
+; CHECK-NEXT: xvmaddadp 9, 27, 59
+; CHECK-NEXT: xvmaddadp 8, 56, 58
+; CHECK-NEXT: xvmaddadp 7, 54, 58
+; CHECK-NEXT: xvmaddadp 6, 52, 58
; CHECK-NEXT: xvmaddadp 41, 30, 58
; CHECK-NEXT: xvmaddadp 43, 28, 58
; CHECK-NEXT: xvmaddadp 42, 26, 58
; CHECK-NEXT: addi 6, 6, 64
; CHECK-NEXT: addi 5, 5, 64
+; CHECK-NEXT: addi 30, 30, 64
+; CHECK-NEXT: addi 29, 29, 64
+; CHECK-NEXT: addi 23, 23, 64
; CHECK-NEXT: addi 24, 24, 64
; CHECK-NEXT: addi 25, 25, 64
-; CHECK-NEXT: addi 29, 29, 64
-; CHECK-NEXT: addi 30, 30, 64
-; CHECK-NEXT: addi 2, 2, 64
; CHECK-NEXT: bdnz .LBB0_4
; CHECK-NEXT: # %bb.5: # %_loop_2_endl_
; CHECK-NEXT: #
; CHECK-NEXT: addi 28, 28, 6
-; CHECK-NEXT: add 26, 26, 12
-; CHECK-NEXT: add 31, 31, 12
-; CHECK-NEXT: add 19, 19, 12
-; CHECK-NEXT: add 3, 3, 12
-; CHECK-NEXT: add 20, 20, 12
-; CHECK-NEXT: add 21, 21, 12
+; CHECK-NEXT: add 26, 26, 2
+; CHECK-NEXT: add 20, 20, 2
+; CHECK-NEXT: add 11, 11, 2
+; CHECK-NEXT: add 19, 19, 2
+; CHECK-NEXT: add 3, 3, 2
+; CHECK-NEXT: add 12, 12, 2
; CHECK-NEXT: addi 27, 27, 1
; CHECK-NEXT: cmpld 28, 4
; CHECK-NEXT: ble 0, .LBB0_3
; CHECK-NEXT: # %bb.6: # %_loop_1_loopHeader_._return_bb_crit_edge.loopexit
-; CHECK-NEXT: ld 3, 56(1) # 8-byte Folded Reload
+; CHECK-NEXT: ld 3, 48(1) # 8-byte Folded Reload
; CHECK-NEXT: lxv 63, 384(1) # 16-byte Folded Reload
-; CHECK-NEXT: stxv 39, 0(3)
+; CHECK-NEXT: stxv 1, 0(3)
; CHECK-NEXT: ld 3, 64(1) # 8-byte Folded Reload
; CHECK-NEXT: lxv 62, 368(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 61, 352(1) # 16-byte Folded Reload
@@ -284,7 +282,7 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: lxv 54, 240(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 53, 224(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 52, 208(1) # 16-byte Folded Reload
-; CHECK-NEXT: stxv 38, 0(3)
+; CHECK-NEXT: stxv 0, 0(3)
; CHECK-NEXT: ld 3, 72(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 31, 584(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 30, 576(1) # 8-byte Folded Reload
@@ -297,8 +295,8 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: ld 29, 520(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 28, 512(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 27, 504(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 33, 0(3)
-; CHECK-NEXT: ld 3, 40(1) # 8-byte Folded Reload
+; CHECK-NEXT: stxv 5, 0(3)
+; CHECK-NEXT: ld 3, 32(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 26, 496(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 25, 488(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 24, 480(1) # 8-byte Folded Reload
@@ -310,40 +308,41 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: ld 18, 432(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 17, 424(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 16, 416(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 32, 0(3)
-; CHECK-NEXT: ld 3, 48(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 37, 0(10)
-; CHECK-NEXT: stxv 36, 0(9)
-; CHECK-NEXT: stxv 13, 0(8)
+; CHECK-NEXT: stxv 4, 0(3)
+; CHECK-NEXT: ld 3, 40(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 15, 408(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 14, 400(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 12, 0(3)
+; CHECK-NEXT: stxv 3, 0(3)
+; CHECK-NEXT: ld 3, 56(1) # 8-byte Folded Reload
+; CHECK-NEXT: stxv 2, 0(8)
+; CHECK-NEXT: stxv 40, 0(3)
; CHECK-NEXT: ld 3, 80(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 11, 0(3)
+; CHECK-NEXT: stxv 39, 0(10)
+; CHECK-NEXT: stxv 38, 0(3)
; CHECK-NEXT: ld 3, 88(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 10, 0(3)
+; CHECK-NEXT: stxv 33, 0(3)
; CHECK-NEXT: ld 3, 96(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 9, 0(3)
+; CHECK-NEXT: stxv 32, 0(3)
; CHECK-NEXT: ld 3, 104(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 8, 0(3)
+; CHECK-NEXT: stxv 37, 0(3)
; CHECK-NEXT: ld 3, 112(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 7, 0(3)
+; CHECK-NEXT: stxv 36, 0(3)
; CHECK-NEXT: ld 3, 120(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 6, 0(3)
+; CHECK-NEXT: stxv 13, 0(3)
; CHECK-NEXT: ld 3, 128(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 5, 0(3)
+; CHECK-NEXT: stxv 12, 0(3)
; CHECK-NEXT: ld 3, 136(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 4, 0(3)
+; CHECK-NEXT: stxv 11, 0(3)
; CHECK-NEXT: ld 3, 144(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 3, 0(3)
+; CHECK-NEXT: stxv 10, 0(3)
; CHECK-NEXT: ld 3, 152(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 2, 0(3)
+; CHECK-NEXT: stxv 9, 0(3)
; CHECK-NEXT: ld 3, 160(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 1, 0(3)
+; CHECK-NEXT: stxv 8, 0(3)
; CHECK-NEXT: ld 3, 168(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 0, 0(3)
+; CHECK-NEXT: stxv 7, 0(3)
; CHECK-NEXT: ld 3, 176(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 40, 0(3)
+; CHECK-NEXT: stxv 6, 0(3)
; CHECK-NEXT: ld 3, 184(1) # 8-byte Folded Reload
; CHECK-NEXT: stxv 41, 0(3)
; CHECK-NEXT: ld 3, 192(1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll b/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
index 799ba63..8fb4c21 100644
--- a/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
+++ b/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
@@ -40,9 +40,10 @@ define signext i32 @test(ptr noalias %PtrA, ptr noalias %PtrB, i32 signext %LenA
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB0_4: # %if.end9
; CHECK-NEXT: #
-; CHECK-NEXT: lwzx 10, 6, 9
+; CHECK-NEXT: add 9, 3, 9
+; CHECK-NEXT: lwz 10, 4(9)
; CHECK-NEXT: addi 10, 10, 1
-; CHECK-NEXT: stwx 10, 6, 9
+; CHECK-NEXT: stw 10, 4(9)
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_5: # %if.then
; CHECK-NEXT: lwax 3, 9, 3
diff --git a/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..34493ce
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,33 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..6e1fe92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..1f91f41
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
@@ -0,0 +1,21 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll
index 1263892..4091524 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -684,13 +684,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -698,25 +698,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -724,8 +724,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -733,7 +733,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -741,12 +741,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -754,8 +754,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -767,7 +767,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -779,13 +779,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -793,8 +793,8 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 12(a0)
; RV32-NEXT: sb zero, 13(a0)
@@ -814,7 +814,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 12(a0)
; RV64-NEXT: sb zero, 13(a0)
@@ -834,7 +834,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
@@ -842,7 +842,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -851,8 +851,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 28(a0)
; RV32-NEXT: sb zero, 29(a0)
@@ -888,7 +888,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 28(a0)
; RV64-NEXT: sb zero, 29(a0)
@@ -924,7 +924,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 16(a0)
; RV32-FAST-NEXT: sw zero, 20(a0)
@@ -936,7 +936,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -947,8 +947,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 60(a0)
; RV32-NEXT: sb zero, 61(a0)
@@ -1016,7 +1016,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 60(a0)
; RV64-NEXT: sb zero, 61(a0)
@@ -1084,7 +1084,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 48(a0)
; RV32-FAST-NEXT: sw zero, 52(a0)
@@ -1104,7 +1104,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 32(a0)
; RV64-FAST-NEXT: sd zero, 40(a0)
@@ -1121,13 +1121,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1135,13 +1135,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1149,14 +1149,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1165,8 +1165,8 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
@@ -1174,7 +1174,7 @@ define void @aligned_bzero_16(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1183,8 +1183,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 16(a0)
; RV32-BOTH-NEXT: sw zero, 20(a0)
@@ -1196,7 +1196,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1207,8 +1207,8 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 48(a0)
; RV32-BOTH-NEXT: sw zero, 52(a0)
@@ -1228,7 +1228,7 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 32(a0)
; RV64-BOTH-NEXT: sd zero, 40(a0)
@@ -1247,28 +1247,28 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
; Usual overlap tricks
-define void @aligned_bzero_7(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_7:
+define void @aligned_memset_zero_7(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_7:
; RV32: # %bb.0:
; RV32-NEXT: sw zero, 0(a0)
; RV32-NEXT: sh zero, 4(a0)
; RV32-NEXT: sb zero, 6(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_7:
+; RV64-LABEL: aligned_memset_zero_7:
; RV64: # %bb.0:
; RV64-NEXT: sw zero, 0(a0)
; RV64-NEXT: sh zero, 4(a0)
; RV64-NEXT: sb zero, 6(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_7:
+; RV32-FAST-LABEL: aligned_memset_zero_7:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 3(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_7:
+; RV64-FAST-LABEL: aligned_memset_zero_7:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 3(a0)
; RV64-FAST-NEXT: sw zero, 0(a0)
@@ -1277,8 +1277,8 @@ define void @aligned_bzero_7(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_15(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_15:
+define void @aligned_memset_zero_15(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_15:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 14(a0)
; RV32-NEXT: sw zero, 0(a0)
@@ -1287,7 +1287,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-NEXT: sh zero, 12(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_15:
+; RV64-LABEL: aligned_memset_zero_15:
; RV64: # %bb.0:
; RV64-NEXT: sd zero, 0(a0)
; RV64-NEXT: sw zero, 8(a0)
@@ -1295,7 +1295,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV64-NEXT: sb zero, 14(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_15:
+; RV32-FAST-LABEL: aligned_memset_zero_15:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 11(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
@@ -1303,7 +1303,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 8(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_15:
+; RV64-FAST-LABEL: aligned_memset_zero_15:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 7(a0)
; RV64-FAST-NEXT: sd zero, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 5747bbb..bd37443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -554,9 +554,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; VLA-NEXT: vmv.v.i v10, 0
; VLA-NEXT: vmv1r.v v0, v8
-; VLA-NEXT: vmerge.vim v8, v10, 1, v0
; VLA-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vmerge.vim v9, v10, 1, v0
; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; VLA-NEXT: vmsne.vi v0, v9, 0
; VLA-NEXT: ret
@@ -568,9 +567,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLS-NEXT: vmv.v.i v9, 0
; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vmv1r.v v0, v8
-; VLS-NEXT: vmerge.vim v8, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLS-NEXT: vmv.v.v v10, v8
+; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; VLS-NEXT: vmsne.vi v0, v10, 0
; VLS-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
index 8963940..2c11bd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
@@ -360,13 +360,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -374,25 +374,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -400,8 +400,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -409,7 +409,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -417,12 +417,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -430,8 +430,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -443,7 +443,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -455,13 +455,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -469,29 +469,29 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -501,8 +501,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
@@ -511,7 +511,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
@@ -520,7 +520,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
@@ -529,7 +529,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -541,8 +541,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: li a1, 64
; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -550,7 +550,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: li a1, 64
; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -558,14 +558,14 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -577,13 +577,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -591,13 +591,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -605,14 +605,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -621,15 +621,15 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -639,8 +639,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -649,7 +649,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -661,15 +661,15 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -679,8 +679,8 @@ define void @aligned_bzero_64(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_66(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_66:
+define void @aligned_memset_zero_66(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_66:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 64(a0)
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -688,7 +688,7 @@ define void @aligned_bzero_66(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_66:
+; RV64-BOTH-LABEL: aligned_memset_zero_66:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 64(a0)
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -699,8 +699,8 @@ define void @aligned_bzero_66(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_96(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_96:
+define void @aligned_memset_zero_96(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_96:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -713,7 +713,7 @@ define void @aligned_bzero_96(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_96:
+; RV64-BOTH-LABEL: aligned_memset_zero_96:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -729,15 +729,15 @@ define void @aligned_bzero_96(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_128(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_128:
+define void @aligned_memset_zero_128(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_128:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_128:
+; RV64-BOTH-LABEL: aligned_memset_zero_128:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -747,8 +747,8 @@ define void @aligned_bzero_128(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_256(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_256:
+define void @aligned_memset_zero_256(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_256:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -757,7 +757,7 @@ define void @aligned_bzero_256(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_256:
+; RV64-BOTH-LABEL: aligned_memset_zero_256:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index 3dc83d5..38d38f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -1636,3 +1636,49 @@ define <8 x half> @vector_interleave8_v8f16_v1f16(<1 x half> %a, <1 x half> %b,
%res = call <8 x half> @llvm.vector.interleave8.v8f16(<1 x half> %a, <1 x half> %b, <1 x half> %c, <1 x half> %d, <1 x half> %e, <1 x half> %f, <1 x half> %g, <1 x half> %h)
ret <8 x half> %res
}
+
+define <8 x i16> @interleave4_const_splat_v8i16(<2 x i16> %a) {
+; CHECK-LABEL: interleave4_const_splat_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_const_splat_v8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave4_const_splat_v8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZIP-NEXT: vmv.v.i v8, 3
+; ZIP-NEXT: ret
+ %retval = call <8 x i16> @llvm.vector.interleave4.v8i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 3), <2 x i16> splat(i16 3), <2 x i16> splat(i16 3))
+ ret <8 x i16> %retval
+}
+
+define <8 x i16> @interleave4_same_nonconst_splat_v8i16(i16 %a) {
+; CHECK-LABEL: interleave4_same_nonconst_splat_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_same_nonconst_splat_v8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave4_same_nonconst_splat_v8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZIP-NEXT: vmv.v.x v8, a0
+; ZIP-NEXT: ret
+ %ins = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <2 x i16> %ins, <2 x i16> poison, <2 x i32> zeroinitializer
+ %retval = call <8 x i16> @llvm.vector.interleave4.v8i16(<2 x i16> %splat, <2 x i16> %splat, <2 x i16> %splat, <2 x i16> %splat)
+ ret <8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 01cc5c5..ee38257 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -14947,3 +14947,147 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv2f64(<vscale x 2 x
%res = call <vscale x 16 x double> @llvm.vector.interleave8.nxv16f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6, <vscale x 2 x double> %v7)
ret <vscale x 16 x double> %res
}
+
+define <vscale x 4 x i16> @interleave2_same_const_splat_nxv4i16() {
+; CHECK-LABEL: interleave2_same_const_splat_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_same_const_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_const_splat_nxv4i16() {
+; V-LABEL: interleave2_diff_const_splat_nxv4i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT: vmv.v.i v9, 3
+; V-NEXT: li a0, 4
+; V-NEXT: vmv.v.i v10, -1
+; V-NEXT: vwaddu.vx v8, v9, a0
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v9, v8, a0
+; V-NEXT: vslideup.vx v8, v9, a0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_diff_const_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 4
+; ZVBB-NEXT: li a0, 3
+; ZVBB-NEXT: vwsll.vi v9, v8, 16
+; ZVBB-NEXT: vwaddu.wx v8, v9, a0
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: srli a0, a0, 2
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v9, v8, a0
+; ZVBB-NEXT: vslideup.vx v8, v9, a0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave2_diff_const_splat_nxv4i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT: vmv.v.i v9, 4
+; ZIP-NEXT: vmv.v.i v10, 3
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: ri.vzip2b.vv v11, v10, v9
+; ZIP-NEXT: ri.vzip2a.vv v8, v10, v9
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v8, v11, a0
+; ZIP-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.v4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 4))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_same_nonconst_splat_nxv4i16(i16 %a) {
+; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a0
+; ZVBB-NEXT: ret
+ %ins = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat, <vscale x 2 x i16> %splat)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) {
+; V-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; V-NEXT: vmv.v.x v9, a0
+; V-NEXT: vmv.v.i v10, -1
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vwaddu.vx v8, v9, a1
+; V-NEXT: vwmaccu.vx v8, a1, v10
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v9, v8, a0
+; V-NEXT: vslideup.vx v8, v9, a0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a1
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vwsll.vi v9, v8, 16
+; ZVBB-NEXT: vwaddu.wx v8, v9, a0
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v9, v8, a1
+; ZVBB-NEXT: vslideup.vx v8, v9, a1
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZIP-NEXT: vmv.v.x v9, a0
+; ZIP-NEXT: vmv.v.x v10, a1
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: ri.vzip2b.vv v11, v9, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v9, v10
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v8, v11, a0
+; ZIP-NEXT: ret
+ %ins1 = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <vscale x 2 x i16> %ins1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %ins2 = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <vscale x 2 x i16> %ins2, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat1, <vscale x 2 x i16> %splat2)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 8 x i16> @interleave4_same_const_splat_nxv8i16() {
+; CHECK-LABEL: interleave4_same_const_splat_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_same_const_splat_nxv8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+ %retval = call <vscale x 8 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 1e2e779..2f2035b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -222,3 +222,14 @@ define <vscale x 1 x i64> @vleff_move_past_passthru(ptr %p, ptr %q, iXLen %avl)
%b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %vec, iXLen %avl)
ret <vscale x 1 x i64> %b
}
+
+define <vscale x 1 x i64> @vmerge(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl) {
+; CHECK-LABEL: vmerge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %a, iXLen %avl)
+ ret <vscale x 1 x i64> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
index 6e106e5..9c3e96d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
@@ -152,3 +152,19 @@ body: |
%y:gpr = ADDI $x0, 1
%z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
...
+---
+name: vmerge_vvm
+body: |
+ bb.0:
+ liveins: $v8, $v0
+ ; CHECK-LABEL: name: vmerge_vvm
+ ; CHECK: liveins: $v8, $v0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %passthru:vrnov0 = COPY $v8
+ ; CHECK-NEXT: %mask:vmv0 = COPY $v0
+ ; CHECK-NEXT: %x:vrnov0 = PseudoVMERGE_VVM_M1 %passthru, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %passthru:vr = COPY $v8
+ %mask:vmv0 = COPY $v0
+ %x:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
+...
diff --git a/llvm/test/CodeGen/RISCV/zilsd.ll b/llvm/test/CodeGen/RISCV/zilsd.ll
index 09b065a..048ce96 100644
--- a/llvm/test/CodeGen/RISCV/zilsd.ll
+++ b/llvm/test/CodeGen/RISCV/zilsd.ll
@@ -117,3 +117,22 @@ entyr:
store i64 0, ptr @g
ret void
}
+
+define void @large_offset(ptr nocapture %p, i64 %d) nounwind {
+; CHECK-LABEL: large_offset:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 4
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: ld a2, -384(a0)
+; CHECK-NEXT: addi a2, a2, 1
+; CHECK-NEXT: seqz a1, a2
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: sd a2, -384(a0)
+; CHECK-NEXT: ret
+entry:
+ %add.ptr = getelementptr inbounds i64, ptr %p, i64 2000
+ %a = load i64, ptr %add.ptr, align 8
+ %b = add i64 %a, 1
+ store i64 %b, ptr %add.ptr, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
new file mode 100644
index 0000000..b2333e6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
@@ -0,0 +1,42 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %}
+
+@.str = private unnamed_addr constant [4 x i8] c"In3\00", align 1
+@.str.2 = private unnamed_addr constant [5 x i8] c"Out4\00", align 1
+@.str.3 = private unnamed_addr constant [5 x i8] c"Out3\00", align 1
+
+
+; CHECK-DAG: %[[#INT32:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#INT4:]] = OpTypeVector %[[#INT32]] 4
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#FLOAT4:]] = OpTypeVector %[[#FLOAT]] 4
+; CHECK-DAG: %[[#INT3:]] = OpTypeVector %[[#INT32]] 3
+; CHECK-DAG: %[[#UNDEF_INT4:]] = OpUndef %[[#INT4]]
+
+define void @case1() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4i32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) %2, i32 0)
+ store <4 x i32> %4, ptr addrspace(11) %5, align 16
+ ret void
+}
+
+define void @case2() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ ; CHECK: %[[#VEC_TRUNCATE:]] = OpVectorShuffle %[[#INT3]] %[[#VEC_SHUFFLE]] %[[#UNDEF_INT4]] 0 1 2
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.3)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = shufflevector <4 x i32> %4, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+ %6 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3i32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) %2, i32 0)
+ store <3 x i32> %5, ptr addrspace(11) %6, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
index 085f8b3..9d07b63 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
@@ -33,7 +33,7 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
%RoundedRangeKernel = alloca %tprange, align 8
call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
- %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel)
ret void
}
@@ -55,7 +55,7 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
%RoundedRangeKernel = alloca %tprange, align 8
call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
- %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
ret void
}
diff --git a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
index a1ff1e0..66337b1 100644
--- a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
+++ b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -print-after-all | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll
new file mode 100644
index 0000000..26dc60e
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll
@@ -0,0 +1,46 @@
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+%struct.S1 = type { <4 x i32>, [10 x <4 x float>], <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+
+define <4 x float> @main() {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %3 = tail call noundef align 1 dereferenceable(192) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 0)
+
+; CHECK-DAG: %[[#ulong:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#ulong_1:]] = OpConstant %[[#ulong]] 1
+; CHECK-DAG: %[[#ulong_3:]] = OpConstant %[[#ulong]] 3
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
+; CHECK-DAG: %[[#uint_10:]] = OpConstant %[[#uint]] 10
+
+; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#v4f:]] = OpTypeVector %[[#float]] 4
+; CHECK-DAG: %[[#arr_v4f:]] = OpTypeArray %[[#v4f]] %[[#uint_10]]
+; CHECK-DAG: %[[#S1:]] = OpTypeStruct %[[#]] %[[#arr_v4f]] %[[#]]
+; CHECK-DAG: %[[#sb_S1:]] = OpTypePointer StorageBuffer %[[#S1]]
+; CHECK-DAG: %[[#sb_v4f:]] = OpTypePointer StorageBuffer %[[#v4f]]
+
+; CHECK: %[[#tmp:]] = OpAccessChain %[[#sb_S1]] %[[#]] %[[#uint_0]] %[[#uint_0]]
+; CHECK: %[[#ptr:]] = OpInBoundsAccessChain %[[#sb_v4f]] %[[#tmp]] %[[#ulong_1]] %[[#ulong_3]]
+; This rewritten GEP combined all constant indices into a single value.
+; We should make sure the correct indices are retrieved.
+ %arrayidx.i = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 64
+
+; CHECK: OpLoad %[[#v4f]] %[[#ptr]]
+ %4 = load <4 x float>, ptr addrspace(11) %arrayidx.i, align 1
+
+ ret <4 x float> %4
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll
new file mode 100644
index 0000000..a6efb38
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll
@@ -0,0 +1,54 @@
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+%struct.S1 = type { <4 x i32>, [10 x <4 x float>], <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+
+define <4 x float> @main(i32 %index) {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %3 = tail call noundef align 1 dereferenceable(192) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 0)
+
+; CHECK-DAG: %[[#ulong:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#ulong_1:]] = OpConstant %[[#ulong]] 1
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
+; CHECK-DAG: %[[#uint_10:]] = OpConstant %[[#uint]] 10
+; CHECK-DAG: %[[#uint_16:]] = OpConstant %[[#uint]] 16
+
+; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#v4f:]] = OpTypeVector %[[#float]] 4
+; CHECK-DAG: %[[#arr_v4f:]] = OpTypeArray %[[#v4f]] %[[#uint_10]]
+; CHECK-DAG: %[[#S1:]] = OpTypeStruct %[[#]] %[[#arr_v4f]] %[[#]]
+; CHECK-DAG: %[[#sb_S1:]] = OpTypePointer StorageBuffer %[[#S1]]
+; CHECK-DAG: %[[#sb_arr_v4f:]] = OpTypePointer StorageBuffer %[[#arr_v4f]]
+; CHECK-DAG: %[[#sb_v4f:]] = OpTypePointer StorageBuffer %[[#v4f]]
+
+; CHECK: %[[#a:]] = OpAccessChain %[[#sb_S1]] %[[#]] %[[#uint_0]] %[[#uint_0]]
+; CHECK: %[[#b:]] = OpInBoundsAccessChain %[[#sb_arr_v4f]] %[[#a]] %[[#ulong_1]]
+ %4 = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 16
+
+; CHECK: %[[#offset:]] = OpIMul %[[#]] %[[#]] %[[#uint_16]]
+; Offset is computed in bytes. Make sure we reconvert it back to an index.
+ %offset = mul i32 %index, 16
+
+; CHECK: %[[#index:]] = OpUDiv %[[#]] %[[#offset]] %[[#uint_16]]
+; CHECK: %[[#c:]] = OpInBoundsAccessChain %[[#sb_v4f]] %[[#b]] %[[#index]]
+ %5 = getelementptr inbounds nuw i8, ptr addrspace(11) %4, i32 %offset
+
+; CHECK: OpLoad %[[#v4f]] %[[#c]]
+ %6 = load <4 x float>, ptr addrspace(11) %5, align 1
+
+ ret <4 x float> %6
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll
new file mode 100644
index 0000000..8e6b5a6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll
@@ -0,0 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+; struct S1 {
+; int4 i;
+; float4 f;
+; };
+; struct S2 {
+; float4 f;
+; int4 i;
+; };
+;
+; StructuredBuffer<S1> In : register(t1);
+; RWStructuredBuffer<S2> Out : register(u0);
+;
+; [numthreads(1,1,1)]
+; void main(uint GI : SV_GroupIndex) {
+; Out[GI].f = In[GI].f;
+; Out[GI].i = In[GI].i;
+; }
+
+%struct.S1 = type { <4 x i32>, <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+@.str.2 = private unnamed_addr constant [4 x i8] c"Out\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+; CHECK-LABEL: main
+; CHECK: %43 = OpFunction %2 None %3 ; -- Begin function main
+; CHECK-NEXT: %1 = OpLabel
+; CHECK-NEXT: %44 = OpVariable %28 Function %38
+; CHECK-NEXT: %45 = OpVariable %27 Function %39
+; CHECK-NEXT: %46 = OpCopyObject %19 %40
+; CHECK-NEXT: %47 = OpCopyObject %16 %41
+; CHECK-NEXT: %48 = OpLoad %4 %42
+; CHECK-NEXT: %49 = OpAccessChain %13 %46 %29 %48
+; CHECK-NEXT: %50 = OpInBoundsAccessChain %9 %49 %31
+; CHECK-NEXT: %51 = OpLoad %8 %50 Aligned 1
+; CHECK-NEXT: %52 = OpAccessChain %11 %47 %29 %48
+; CHECK-NEXT: %53 = OpInBoundsAccessChain %9 %52 %29
+; CHECK-NEXT: OpStore %53 %51 Aligned 1
+; CHECK-NEXT: %54 = OpAccessChain %6 %49 %29
+; CHECK-NEXT: %55 = OpLoad %5 %54 Aligned 1
+; CHECK-NEXT: %56 = OpInBoundsAccessChain %6 %52 %31
+; CHECK-NEXT: OpStore %56 %55 Aligned 1
+; CHECK-NEXT: OpReturn
+; CHECK-NEXT: OpFunctionEnd
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %1 = tail call target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %2 = tail call i32 @llvm.spv.flattened.thread.id.in.group()
+ %3 = tail call noundef align 1 dereferenceable(32) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 %2)
+ %f.i = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 16
+ %4 = load <4 x float>, ptr addrspace(11) %f.i, align 1
+ %5 = tail call noundef align 1 dereferenceable(32) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) %1, i32 %2)
+ store <4 x float> %4, ptr addrspace(11) %5, align 1
+ %6 = load <4 x i32>, ptr addrspace(11) %3, align 1
+ %i6.i = getelementptr inbounds nuw i8, ptr addrspace(11) %5, i64 16
+ store <4 x i32> %6, ptr addrspace(11) %i6.i, align 1
+ ret void
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+
+declare target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(i32, i32, i32, i32, i1, ptr)
+
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1), i32)
+
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
new file mode 100644
index 0000000..8b5efe7
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
@@ -0,0 +1,104 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpCapability Float16
+; CHECK-DAG: OpCapability ImageBasic
+; CHECK-DAG: OpCapability ImageReadWrite
+; CHECK-DAG: OpCapability Pipes
+; CHECK-DAG: OpCapability DeviceEnqueue
+
+; CHECK-DAG: %[[#VOID:]] = OpTypeVoid
+; CHECK-DAG: %[[#INT:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#HALF:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#PIPE_RD:]] = OpTypePipe ReadOnly
+; CHECK-DAG: %[[#PIPE_WR:]] = OpTypePipe WriteOnly
+; CHECK-DAG: %[[#IMG1D_RD:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown ReadOnly
+; CHECK-DAG: %[[#IMG2D_RD:]] = OpTypeImage %[[#INT]] 2D 0 0 0 0
+; CHECK-DAG: %[[#IMG3D_RD:]] = OpTypeImage %[[#INT]] 3D 0 0 0 0
+; CHECK-DAG: %[[#IMG2DA_RD:]] = OpTypeImage %[[#HALF]] 2D 0 1 0 0
+; CHECK-DAG: %[[#IMG2DD_RD:]] = OpTypeImage %[[#FLOAT]] Buffer 0 0 0
+; CHECK-DAG: %[[#IMG1D_WR:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown WriteOnly
+; CHECK-DAG: %[[#IMG2D_RW:]] = OpTypeImage %[[#VOID]] 2D 0 0 0 0 Unknown ReadWrite
+; CHECK-DAG: %[[#IMG1DB_RD:]] = OpTypeImage %[[#FLOAT]] 2D 1 0 0 0
+
+; CHECK-DAG: %[[#DEVEVENT:]] = OpTypeDeviceEvent
+; CHECK-DAG: %[[#EVENT:]] = OpTypeEvent
+; CHECK-DAG: %[[#QUEUE:]] = OpTypeQueue
+; CHECK-DAG: %[[#RESID:]] = OpTypeReserveId
+; CHECK-DAG: %[[#SAMP:]] = OpTypeSampler
+; CHECK-DAG: %[[#SAMPIMG:]] = OpTypeSampledImage %[[#IMG1DB_RD]]
+
+; CHECK-DAG: %[[#]] = OpFunction %[[#VOID]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG3D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DA_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DD_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RW]]
+
+define spir_kernel void @foo(
+ target("spirv.Pipe", 0) %a,
+ target("spirv.Pipe", 1) %b,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %c1,
+ target("spirv.Image", i32, 1, 0, 0, 0, 0, 0, 0) %d1,
+ target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) %e1,
+ target("spirv.Image", half, 1, 0, 1, 0, 0, 0, 0) %f1,
+ target("spirv.Image", float, 5, 0, 0, 0, 0, 0, 0) %g1,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1) %c2,
+ target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 2) %d3) #0 !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
+entry:
+ ret void
+}
+
+; CHECK-DAG: %[[#]] = OpFunction
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#DEVEVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#EVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#QUEUE]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#RESID]]
+
+; CHECK-DAG: %[[#IMARG:]] = OpFunctionParameter %[[#IMG1DB_RD]]
+; CHECK-DAG: %[[#SAMARG:]] = OpFunctionParameter %[[#SAMP]]
+; CHECK-DAG: %[[#SAMPIMVAR:]] = OpSampledImage %[[#SAMPIMG]] %[[#IMARG]] %[[#SAMARG]]
+; CHECK-DAG: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SAMPIMVAR]]
+
+define spir_func void @bar(
+ target("spirv.DeviceEvent") %a,
+ target("spirv.Event") %b,
+ target("spirv.Queue") %c,
+ target("spirv.ReserveId") %d) {
+ ret void
+}
+
+define spir_func void @test_sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce,
+ target("spirv.Sampler") %s.coerce) {
+ %1 = tail call spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce, target("spirv.Sampler") %s.coerce) #1
+ %2 = tail call spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) %1, <4 x i32> zeroinitializer, i32 2, float 1.000000e+00) #1
+ ret void
+}
+
+declare spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0), target("spirv.Sampler"))
+
+declare spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0), <4 x i32>, i32, float)
+
+attributes #0 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!opencl.enable.FP_CONTRACT = !{}
+!opencl.spir.version = !{!6}
+!opencl.ocl.version = !{!7}
+!opencl.used.extensions = !{!8}
+!opencl.used.optional.core.features = !{!9}
+!opencl.compiler.options = !{!8}
+
+!1 = !{i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1}
+!2 = !{!"read_only", !"write_only", !"read_only", !"read_only", !"read_only", !"read_only", !"read_only", !"write_only", !"read_write"}
+!3 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!4 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!5 = !{!"pipe", !"pipe", !"", !"", !"", !"", !"", !"", !""}
+!6 = !{i32 1, i32 2}
+!7 = !{i32 2, i32 0}
+!8 = !{!"cl_khr_fp16"}
+!9 = !{!"cl_images"}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
new file mode 100644
index 0000000..63b2604
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
@@ -0,0 +1,19 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#Fun:]] "_Z3booi"
+; CHECK-DAG: OpDecorate %[[#Param:]] FuncParamAttr Sret
+; CHECK-DAG: %[[#PtrTy:]] = OpTypePointer Function %[[#StructTy:]]
+; CHECK-DAG: %[[#StructTy]] = OpTypeStruct
+; CHECK: %[[#Fun]] = OpFunction %[[#]]
+; CHECK: %[[#Param]] = OpFunctionParameter %[[#PtrTy]]
+
+%struct.Example = type { }
+
+define spir_func i32 @foo() {
+ %1 = alloca %struct.Example, align 8
+ call void @_Z3booi(ptr sret(%struct.Example) align 8 %1, i32 noundef 42)
+ ret i32 0
+}
+
+declare void @_Z3booi(ptr sret(%struct.Example) align 8, i32 noundef)
diff --git a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
index 73c7a8d..5835616 100644
--- a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
@@ -3,6 +3,23 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+; Test a v16i8 -> v8i16 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f1_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhb %v0, %v24
+; CHECK-NEXT: vuplhb %v1, %v26
+; CHECK-NEXT: vmlhw %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext1 = zext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext2 = zext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %zext1, %zext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 unsigned widening multiplication.
define <8 x i16> @f1(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f1:
@@ -31,6 +48,23 @@ define <8 x i16> @f2(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v16i8 -> v8i16 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f3_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphb %v0, %v26
+; CHECK-NEXT: vuphb %v1, %v24
+; CHECK-NEXT: vmlhw %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext1 = sext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext2 = sext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %sext1, %sext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 signed widening multiplication.
define <8 x i16> @f3(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f3:
@@ -59,6 +93,23 @@ define <8 x i16> @f4(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v8i16 -> v4i32 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f5_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f5_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhh %v0, %v24
+; CHECK-NEXT: vuplhh %v1, %v26
+; CHECK-NEXT: vmlf %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext1 = zext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext2 = zext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %zext1, %zext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 unsigned widening multiplication.
define <4 x i32> @f5(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f5:
@@ -87,6 +138,23 @@ define <4 x i32> @f6(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v8i16 -> v4i32 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f7_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphh %v0, %v26
+; CHECK-NEXT: vuphh %v1, %v24
+; CHECK-NEXT: vmlf %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext1 = sext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext2 = sext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %sext1, %sext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 signed widening multiplication.
define <4 x i32> @f7(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f7:
@@ -115,6 +183,29 @@ define <4 x i32> @f8(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v4i32 -> v2i64 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f9_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f9_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhf %v0, %v24
+; CHECK-NEXT: vuplhf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext1 = zext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext2 = zext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %zext1, %zext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 unsigned widening multiplication.
define <2 x i64> @f9(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f9:
@@ -143,6 +234,29 @@ define <2 x i64> @f10(<4 x i32> %val1, <4 x i32> %val2) {
ret <2 x i64> %ret
}
+; Test a v4i32 -> v2i64 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f11_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f11_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphf %v0, %v24
+; CHECK-NEXT: vuphf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext1 = sext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext2 = sext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %sext1, %sext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 signed widening multiplication.
define <2 x i64> @f11(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f11:
diff --git a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
index 60cfc27..4a4973b 100644
--- a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
+++ b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
@@ -68,9 +68,9 @@ target triple = "wasm32-unknown-unknown"
; bleeding-edge: +atomics, +bulk-memory, +bulk-memory-opt,
; +call-indirect-overlong, +exception-handling,
-; +extended-const, +fp16, +multimemory, +multivalue,
+; +extended-const, +fp16, +gc, +multimemory, +multivalue,
; +mutable-globals, +nontrapping-fptoint, +relaxed-simd,
-; +reference-types, +simd128, +sign-ext, +tail-call, +gc
+; +reference-types, +simd128, +sign-ext, +tail-call
; BLEEDING-EDGE-LABEL: .section .custom_section.target_features,"",@
; BLEEDING-EDGE-NEXT: .int8 17
; BLEEDING-EDGE-NEXT: .int8 43
diff --git a/llvm/test/CodeGen/X86/apx/cf.ll b/llvm/test/CodeGen/X86/apx/cf.ll
index b111ae5..e52ce6c 100644
--- a/llvm/test/CodeGen/X86/apx/cf.ll
+++ b/llvm/test/CodeGen/X86/apx/cf.ll
@@ -194,3 +194,38 @@ entry:
call void @llvm.masked.store.v1i64.p0(<1 x i64> %3, ptr %p, i32 4, <1 x i1> %0)
ret void
}
+
+define void @sink_gep(ptr %p, i1 %cond) {
+; CHECK-LABEL: sink_gep:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovnel %eax, 112(%rdi)
+; CHECK-NEXT: cfcmovnel 112(%rdi), %eax
+; CHECK-NEXT: movl %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = getelementptr i8, ptr %p, i64 112
+ br label %next
+
+next:
+ %1 = bitcast i1 %cond to <1 x i1>
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %0, i32 1, <1 x i1> %1)
+ %2 = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr %0, i32 1, <1 x i1> %1, <1 x i32> zeroinitializer)
+ store <1 x i32> %2, ptr %p, align 4
+ ret void
+}
+
+define void @xor_cond(ptr %p, i1 %cond) {
+; CHECK-LABEL: xor_cond:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovel %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = xor i1 %cond, true
+ %1 = insertelement <1 x i1> zeroinitializer, i1 %0, i64 0
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %p, i32 1, <1 x i1> %1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
new file mode 100644
index 0000000..1136287
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
@@ -0,0 +1,43 @@
+;; Test if temporary labels are generated for each indirect callsite with a callee_type metadata.
+;; Test if the .callgraph section contains the MD5 hash of callee type ids generated from
+;; generalized type id strings.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -o - < %s | FileCheck %s
+
+; CHECK: ball:
+; CHECK-NEXT: [[LABEL_FUNC:\.Lfunc_begin[0-9]+]]:
+define ptr @ball() {
+entry:
+ %fp_foo_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP0:\.L.*]]:
+ call void (...) %fp_foo_val(), !callee_type !0
+ %fp_bar_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP1:\.L.*]]:
+ %call_fp_bar = call i32 %fp_bar_val(i8 0), !callee_type !2
+ %fp_baz_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP2:\.L.*]]:
+ %call_fp_baz = call ptr %fp_baz_val(ptr null), !callee_type !4
+ ret ptr %call_fp_baz
+}
+
+; CHECK: .section .callgraph,"o",@progbits,.text
+
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .quad [[LABEL_FUNC]]
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad 3
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvE.generalized"}
+;; Test for MD5 hash of _ZTSFvE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 4524972987496481828
+; CHECK-NEXT: .quad [[LABEL_TMP0]]
+!2 = !{!3}
+!3 = !{i64 0, !"_ZTSFicE.generalized"}
+;; Test for MD5 hash of _ZTSFicE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 3498816979441845844
+; CHECK-NEXT: .quad [[LABEL_TMP1]]
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
+;; Test for MD5 hash of _ZTSFPvS_E.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 8646233951371320954
+; CHECK-NEXT: .quad [[LABEL_TMP2]]
diff --git a/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
new file mode 100644
index 0000000..fa14a98
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
@@ -0,0 +1,34 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file for tailcalls.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+define i32 @main(i32 %argc) !type !3 {
+entry:
+ %andop = and i32 %argc, 1
+ %cmp = icmp eq i32 %andop, 0
+ %foo.bar = select i1 %cmp, ptr @foo, ptr @bar
+ %call.i = tail call i32 %foo.bar(i8 signext 97), !callee_type !1
+ ret i32 %call.i
+}
+
+declare !type !2 i32 @foo(i8 signext)
+
+declare !type !2 i32 @bar(i8 signext)
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{i64 0, !"_ZTSFiiE.generalized"}
diff --git a/llvm/test/CodeGen/X86/call-graph-section.ll b/llvm/test/CodeGen/X86/call-graph-section.ll
new file mode 100644
index 0000000..4a9840e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section.ll
@@ -0,0 +1,38 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+declare !type !0 void @foo()
+
+declare !type !1 i32 @bar(i8)
+
+declare !type !2 ptr @baz(ptr)
+
+define void @main() {
+entry:
+ %a = alloca i8, align 1
+ %fp_foo_val = load ptr, ptr null, align 8
+ call void (...) %fp_foo_val(), !callee_type !1
+ %fp_bar_val = load ptr, ptr null, align 8
+ %param = trunc i64 0 to i8
+ %call_fp_bar = call i32 %fp_bar_val(i8 signext %param), !callee_type !3
+ %fp_baz_val = load ptr, ptr null, align 8
+ %call_fp_baz = call ptr %fp_baz_val(ptr %a), !callee_type !4
+ ret void
+}
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+; CHECK-DAG: 2444f731 f5eecb3e
+!0 = !{i64 0, !"_ZTSFvE.generalized"}
+!1 = !{!0}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{!2}
+; CHECK-DAG: 7ade6814 f897fd77
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
diff --git a/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..7881ea7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple x86_64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..8f6b7a6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..e97a6ac
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
index 17de405..0f28964 100644
--- a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir
+++ b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
@@ -1,6 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=early-tailduplication -tail-dup-pred-size=1 -tail-dup-succ-size=1 %s -o - | FileCheck %s
-# Check that only the computed goto is not be restrict by tail-dup-pred-size and tail-dup-succ-size.
+#
+# Check that only the computed goto and others are restricted by tail-dup-pred-size and tail-dup-succ-size.
+#
--- |
@computed_goto.dispatch = constant [5 x ptr] [ptr null, ptr blockaddress(@computed_goto, %bb1), ptr blockaddress(@computed_goto, %bb2), ptr blockaddress(@computed_goto, %bb3), ptr blockaddress(@computed_goto, %bb4)]
declare i64 @f0()
@@ -30,54 +32,54 @@ tracksRegLiveness: true
body: |
; CHECK-LABEL: name: computed_goto
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64_nosp = COPY [[COPY2]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gr64_nosp = COPY [[COPY4]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY4]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_nosp = COPY [[COPY6]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY6]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY8:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gr64_nosp = COPY [[COPY8]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY8]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr64_nosp = PHI [[COPY]], %bb.0, [[COPY4]], %bb.4, [[COPY3]], %bb.3, [[COPY2]], %bb.2, [[COPY1]], %bb.1
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[PHI]], @computed_goto.dispatch, $noreg
bb.0:
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
index ca1153a..bdada7d 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
@@ -1,12 +1,5 @@
; RUN: opt < %s -S -passes=hwasan -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
-; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
-; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
-
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
-
-; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
-
; CHECK: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\10" }, align 16
; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 268435460 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
@@ -17,14 +10,21 @@
; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 318767088 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 301989920 }, section "hwasan_globals", !associated [[HUGE]]
+; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
+; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
+
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+
+; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+
; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 2305843009213693952) to ptr)
; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 2449958197289549824) to ptr)
; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 2594073385365405696) to ptr)
-; CHECK: [[NOTE]] = !{ptr @hwasan.note}
; CHECK: [[FOUR]] = !{ptr @four.hwasan}
; CHECK: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
; CHECK: [[HUGE]] = !{ptr @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
source_filename = "foo"
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll b/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
index f5ae1c0..4c28523 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
@@ -1,16 +1,11 @@
-; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android29 | FileCheck --check-prefixes=CHECK,CHECK29 %s
-; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android30 | FileCheck --check-prefixes=CHECK,CHECK30 %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android29 | FileCheck --check-prefixes=CHECK,CHECK29,NOALLGLOBALS %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android30 | FileCheck --check-prefixes=CHECK,CHECK30,NOALLGLOBALS %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=riscv64-unknown-elf -hwasan-globals=1 -hwasan-all-globals=1 | FileCheck --check-prefixes=CHECK,CHECK30,ALLGLOBALS %s
; CHECK29: @four = global
; CHECK: @specialcaselisted = global i16 2, no_sanitize_hwaddress
-; CHECK: @insection = global i16 2, section "custom"
-; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
-; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
-
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
-
-; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+; NOALLGLOBALS: @insection = global i16 2, section "custom"
; CHECK30: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\AC" }, align 16
; CHECK30: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 -1409286140 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
@@ -22,14 +17,23 @@
; CHECK30: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 -1358954512 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK30: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 -1375731680 }, section "hwasan_globals", !associated [[HUGE]]
+; ALLGLOBALS: @insection.hwasan = private global { i16, [14 x i8] } { i16 2, [14 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\AF" }, section "custom", align 16
+
+; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
+; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
+
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+
+; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+
; CHECK30: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 -6052837899185946624) to ptr)
; CHECK30: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 -5980780305148018688) to ptr)
; CHECK30: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 -5908722711110090752) to ptr)
-; CHECK: [[NOTE]] = !{ptr @hwasan.note}
; CHECK30: [[FOUR]] = !{ptr @four.hwasan}
; CHECK30: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
; CHECK30: [[HUGE]] = !{ptr @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
source_filename = "foo"
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
index bc0f586..f586e4a 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
@@ -173,6 +173,21 @@ v_mul_f16_e32 v5, v1, v255
v_mul_f16_e32 v5, v255, v2
// GFX11: :[[@LINE-1]]:1: error: operands are not valid for this GPU or mode
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX11: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
v_sub_f16_dpp v255, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX11: :[[@LINE-1]]:1: error: operands are not valid for this GPU or mode
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
index 04c55ee..1f40a32 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
@@ -217,6 +217,66 @@ v_mad_nc_i64_i32 v[2:3], v4, v7, 12345
v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp
// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+v_add_min_i32 v2, s4, v7, v8
+// GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, v4, 0, 1
+// GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_i32 v2, v4, 3, s2
+// GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_i32 v2, s4, 4, v2
+// GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_i32 v2, v4, v7, 12345
+// GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_i32 v2, s4, v7, v8
+// GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_i32 v2, v4, 0, 1
+// GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_i32 v2, v4, 3, s2
+// GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_i32 v2, s4, 4, v2
+// GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_i32 v2, v4, v7, 12345
+// GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_min_u32 v2, s4, v7, v8
+// GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_u32 v2, v4, 0, 1
+// GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_u32 v2, v4, 3, s2
+// GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_u32 v2, s4, 4, v2
+// GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_u32 v2, v4, v7, 12345
+// GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_u32 v2, s4, v7, v8
+// GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_u32 v2, v4, 0, 1
+// GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_u32 v2, v4, 3, s2
+// GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_u32 v2, s4, 4, v2
+// GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_u32 v2, v4, v7, 12345
+// GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
v_cvt_pk_bf16_f32 v5, v1, v2
// GFX1250: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6d,0xd7,0x01,0x05,0x02,0x00]
@@ -261,3 +321,448 @@ v_cvt_pk_bf16_f32 v5, src_scc, vcc_lo mul:4
v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
// GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ashr_pk_i8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_i8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_i8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+v_ashr_pk_u8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_u8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_u8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,0]
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1]
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, v2 clamp
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, s2
+// GFX1250: v_cvt_pk_bf8_f16 v1, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 100.0
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_bf8_f16
+
+v_cvt_pk_bf8_f16 v1, 1
+// GFX1250: v_cvt_pk_bf8_f16 v1, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0x3800
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0.5
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0x3118
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0.15915494
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,0]
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1]
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 clamp
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, s2
+// GFX1250: v_cvt_pk_fp8_f16 v1, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 100.0
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_fp8_f16
+
+v_cvt_pk_fp8_f16 v1, 1
+// GFX1250: v_cvt_pk_fp8_f16 v1, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0x3800
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0.5
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0x3118
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0.15915494
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, v1, v2
+// GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_f16_f32 v5, v255, v255
+// GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_f16_f32 v5, s1, s2
+// GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, s105, s105
+// GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_f16_f32 v5, ttmp15, src_scc
+// GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, m0, 0.5
+// GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_lo, -1
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_hi, null
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, null, exec_lo
+// GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, -1, exec_hi
+// GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2
+// GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4
+// GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_f16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_bf8_f16 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_bf8_f16 v1, |v2|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_fp8_f16 v1, |v2|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1]
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_fp8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_bf8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
index ebfeb3f..03f642d 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
@@ -217,6 +217,66 @@ v_mad_nc_i64_i32 v[2:3], v4, v7, 12345
v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp
// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+v_add_min_i32 v2, s4, v7, v8
+// GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, v4, 0, 1
+// GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_i32 v2, v4, 3, s2
+// GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_i32 v2, s4, 4, v2
+// GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_i32 v2, v4, v7, 12345
+// GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_i32 v2, s4, v7, v8
+// GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_i32 v2, v4, 0, 1
+// GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_i32 v2, v4, 3, s2
+// GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_i32 v2, s4, 4, v2
+// GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_i32 v2, v4, v7, 12345
+// GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_min_u32 v2, s4, v7, v8
+// GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_u32 v2, v4, 0, 1
+// GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_u32 v2, v4, 3, s2
+// GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_u32 v2, s4, 4, v2
+// GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_u32 v2, v4, v7, 12345
+// GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_u32 v2, s4, v7, v8
+// GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_u32 v2, v4, 0, 1
+// GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_u32 v2, v4, 3, s2
+// GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_u32 v2, s4, 4, v2
+// GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_u32 v2, v4, v7, 12345
+// GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
v_cvt_pk_bf16_f32 v5, v1, v2
// GFX1250: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6d,0xd7,0x01,0x05,0x02,0x00]
@@ -261,3 +321,448 @@ v_cvt_pk_bf16_f32 v5, src_scc, vcc_lo mul:4
v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
// GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ashr_pk_i8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_i8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_i8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+v_ashr_pk_u8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_u8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_u8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+v_cvt_pk_bf8_f16 v1.l, v2
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.h, v2
+// GFX1250: v_cvt_pk_bf8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v0.l, v2 clamp
+// GFX1250: v_cvt_pk_bf8_f16 v0.l, v2 clamp ; encoding: [0x00,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, s2
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 100.0
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_bf8_f16
+
+v_cvt_pk_bf8_f16 v1.l, 1
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0x3800
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0.5
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0x3118
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0.15915494
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, v2
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.h, v2
+// GFX1250: v_cvt_pk_fp8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, v2 clamp
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, s2
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 100.0
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_fp8_f16
+
+v_cvt_pk_fp8_f16 v1.l, 1
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0x3800
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0.5
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0x3118
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0.15915494
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, v1, v2
+// GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_f16_f32 v5, v255, v255
+// GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_f16_f32 v5, s1, s2
+// GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, s105, s105
+// GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_f16_f32 v5, ttmp15, src_scc
+// GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, m0, 0.5
+// GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_lo, -1
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_hi, null
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, null, exec_lo
+// GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, -1, exec_hi
+// GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2
+// GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4
+// GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_f16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_bf8_f16 v1, |v2.l|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, |v2.h|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_fp8_f16 v1, |v2.l|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, |v2.h|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.l, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.h, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_fp8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.h, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_bf8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
index d9c7645..a926f7e 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
@@ -146,6 +146,54 @@ v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:102 op_sel:[1,1,1,1] quad_perm:[0,1,2
// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -201,3 +249,263 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:40: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:61: error: not a valid operand.
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,0] quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,0] quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
index ccf50b2..f766e52 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
@@ -146,6 +146,54 @@ v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:102 quad_perm:[0,1,2,3]
// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -201,3 +249,263 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:42: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:42: error: invalid operand for instruction
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1.l, v2 quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1.l, v2 quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
index 40d27c8..3b864b9 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
@@ -122,6 +122,38 @@ v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:102 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,
// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -137,3 +169,191 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:58: error: not a valid operand.
+
+v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
index fb5593d..c726a0d 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
@@ -122,6 +122,38 @@ v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:102 dpp8:[0,0,0,0,0,0,0,0]
// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -137,3 +169,191 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction
+
+v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
index 7e29d04..c5bd00c 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
@@ -86,3 +86,78 @@ v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
// GFX125X-ERR-NEXT:{{^}}v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_u8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_fp8_f16 v1, v2, v3 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_fp8_f16 v1, v2, v3 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_fp8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_fp8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid scale_sel value.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:4
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid byte_sel value.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:4
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], 1, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp8 v[10:13], 1, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_bf16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp8 v[10:17], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp4 v[10:13], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_bf16_fp4 v[10:13], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s
new file mode 100644
index 0000000..b7d93e1
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s
@@ -0,0 +1,20 @@
+// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --unique --sort --version 5
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 %s 2>&1 | FileCheck --check-prefix=GFX12 --implicit-check-not=error: %s
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
index 9fd7edd..ce8cfcb 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
@@ -236,6 +236,66 @@
0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04
# GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf
# GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
@@ -281,6 +341,482 @@
0x05,0x00,0x6d,0xd7,0x6a,0xf6,0x00,0x00
# GFX1250: v_cvt_pk_bf16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6d,0xd7,0x6a,0xf6,0x00,0x00]
-## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-# GFX1250-FAKE16: {{.*}}
-# GFX1250-REAL16: {{.*}}
+0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xfc,0x4b,0x00,0x38,0x00,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0x3800 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xfc,0x4b,0x00,0x38,0x00,0x00]
+
+0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04
+# GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04
+# GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08
+# GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10
+# GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20
+# GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00
+# GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20
+# GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
index f8d9afe..5fa7bc8 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
@@ -132,6 +132,42 @@
0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff
# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff]
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
@@ -173,3 +209,216 @@
0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+
+0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+
+0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+
+0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+
+0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+
+0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+
+0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+
+0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+
+0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
index 44726a1..faeff45 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
@@ -110,6 +110,30 @@
0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
@@ -121,3 +145,162 @@
0x05,0x00,0x6d,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6d,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+
+0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+
+0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
diff --git a/llvm/test/MC/ELF/many-instructions.s b/llvm/test/MC/ELF/many-instructions.s
deleted file mode 100644
index cbdb2a7..0000000
--- a/llvm/test/MC/ELF/many-instructions.s
+++ /dev/null
@@ -1,10 +0,0 @@
-# REQUIRES: asserts
-# RUN: llvm-mc -filetype=obj -triple=x86_64 %s -o /dev/null -debug-only=mc-dump
-
-## Test that encodeInstruction may cause a new fragment to be created.
-# CHECK: 0 Data Size:16200
-# CHECK: 16200 Data Size:180
-
-.rept 16384/10
-movabsq $foo, %rax
-.endr
diff --git a/llvm/test/MC/X86/verify-callgraph-section.s b/llvm/test/MC/X86/verify-callgraph-section.s
new file mode 100644
index 0000000..ce07228
--- /dev/null
+++ b/llvm/test/MC/X86/verify-callgraph-section.s
@@ -0,0 +1,58 @@
+/// Test the callgraph section to make sure the indirect callsites
+/// (annotated by generated temporary labels .Ltmp*) are associated
+/// with the corresponding callee type identifiers.
+
+// RUN: llvm-mc -triple=x86_64 -filetype=obj -o - < %s | llvm-readelf -x .callgraph - | FileCheck %s
+
+ .text
+ .globl ball # -- Begin function ball
+ .p2align 4
+ .type ball,@function
+ball: # @ball
+.Lfunc_begin0:
+# %bb.0: # %entry
+ pushq %rbx
+ subq $32, %rsp
+ movl $0, 4(%rsp)
+ movq foo@GOTPCREL(%rip), %rcx
+ movq %rcx, 24(%rsp)
+ xorl %eax, %eax
+ callq *%rcx
+.Ltmp0:
+ movq bar@GOTPCREL(%rip), %rax
+ movq %rax, 16(%rsp)
+ movsbl 3(%rsp), %edi
+ callq *%rax
+.Ltmp1:
+ movq baz@GOTPCREL(%rip), %rax
+ movq %rax, 8(%rsp)
+ leaq 3(%rsp), %rbx
+ movq %rbx, %rdi
+ callq *%rax
+.Ltmp2:
+ callq foo@PLT
+ movsbl 3(%rsp), %edi
+ callq bar@PLT
+ movq %rbx, %rdi
+ callq baz@PLT
+ addq $32, %rsp
+ popq %rbx
+ retq
+ .section .callgraph,"o",@progbits,.text
+ .quad 0
+ .quad .Lfunc_begin0
+ .quad 1
+ .quad 3
+ /// MD5 hash of the callee type ID for foo.
+ // CHECK: 2444f731 f5eecb3e
+ .quad 0x3ecbeef531f74424
+ .quad .Ltmp0
+ /// MD5 hash of the callee type ID for bar.
+ // CHECK: 5486bc59 814b8e30
+ .quad 0x308e4b8159bc8654
+ .quad .Ltmp1
+ /// MD5 hash of the callee type ID for baz.
+ // CHECK: 7ade6814 f897fd77
+ .quad 0x77fd97f81468de7a
+ .quad .Ltmp2
+ .text
diff --git a/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll b/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
index da756e7..9eb9bda 100644
--- a/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
+++ b/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
@@ -30,6 +30,20 @@
; RUN: llvm-objdump -d %tout-read.1 | FileCheck %s --check-prefix=THUNK1
; RUN: llvm-objdump -d %tout-read.2 | FileCheck %s --check-prefix=THUNK2
+; It runs the same if we use -indexed-codegen-data-read-function-map-names=false.
+; RUN: llvm-lto2 run -enable-global-merge-func=true \
+; RUN: -indexed-codegen-data-read-function-map-names=false \
+; RUN: -codegen-data-use-path=%tout.cgdata \
+; RUN: %t-foo.bc %t-goo.bc -o %tout-read \
+; RUN: -r %t-foo.bc,_f1,px \
+; RUN: -r %t-goo.bc,_f2,px \
+; RUN: -r %t-foo.bc,_g,l -r %t-foo.bc,_g1,l -r %t-foo.bc,_g2,l \
+; RUN: -r %t-goo.bc,_g,l -r %t-goo.bc,_g1,l -r %t-goo.bc,_g2,l
+; RUN: llvm-nm %tout-read.1 | FileCheck %s --check-prefix=READ1
+; RUN: llvm-nm %tout-read.2 | FileCheck %s --check-prefix=READ2
+; RUN: llvm-objdump -d %tout-read.1 | FileCheck %s --check-prefix=THUNK1
+; RUN: llvm-objdump -d %tout-read.2 | FileCheck %s --check-prefix=THUNK2
+
; READ1: _f1.Tgm
; READ2: _f2.Tgm
diff --git a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
index 4d57199..bb3001e 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
@@ -190,6 +190,39 @@ return: ; preds = %entry, %if.end
ret i32 %retval.0
}
+define i32 @ctz3_with_i8gep(i32 %x) {
+; CHECK-LABEL: @ctz3_with_i8gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[TMP2]], [[IF_END]] ], [ 32, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %sub = sub i32 0, %x
+ %and = and i32 %x, %sub
+ %mul = mul i32 %and, 81224991
+ %0 = lshr i32 %mul, 25
+ %1 = and i32 %0, 124
+ %arrayidx.idx = zext nneg i32 %1 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @ctz3.table, i64 %arrayidx.idx
+ %2 = load i32, ptr %arrayidx, align 4
+ br label %return
+
+return: ; preds = %if.end, %entry
+ %retval.0 = phi i32 [ %2, %if.end ], [ 32, %entry ]
+ ret i32 %retval.0
+}
+
+
@table = internal unnamed_addr constant [64 x i32] [i32 0, i32 1, i32 12, i32 2, i32 13, i32 22, i32 17, i32 3, i32 14, i32 33, i32 23, i32 36, i32 18, i32 58, i32 28, i32 4, i32 62, i32 15, i32 34, i32 26, i32 24, i32 48, i32 50, i32 37, i32 19, i32 55, i32 59, i32 52, i32 29, i32 44, i32 39, i32 5, i32 63, i32 11, i32 21, i32 16, i32 32, i32 35, i32 57, i32 27, i32 61, i32 25, i32 47, i32 49, i32 54, i32 51, i32 43, i32 38, i32 10, i32 20, i32 31, i32 56, i32 60, i32 46, i32 53, i32 42, i32 9, i32 30, i32 45, i32 41, i32 8, i32 40, i32 7, i32 6], align 4
define i32 @ctz4(i64 %b) {
@@ -276,3 +309,192 @@ entry:
%0 = load i32, ptr %arrayidx, align 4
ret i32 %0
}
+
+;; This has a wrong table size but is otherwise fine.
+@ctz9.table = internal unnamed_addr constant [128 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
+define i32 @ctz9(i32 %x) {
+; CHECK-LABEL: @ctz9(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [128 x i8], ptr @ctz9.table, i64 0, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
+
+define i32 @ctz1_with_i8_gep(i32 %x) {
+; CHECK-LABEL: @ctz1_with_i8_gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds i8, ptr @ctz7.table, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
+
+; This is the same a ctz2 (i16 table) with an i8 gep making the indices invalid
+define i32 @ctz2_with_i8_gep(i32 %x) {
+; CHECK-LABEL: @ctz2_with_i8_gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[X]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], 72416175
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[MUL]], 26
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[SHR]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i8], ptr @ctz2.table, i64 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 72416175
+ %shr = lshr i32 %mul, 26
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [64 x i8], ptr @ctz2.table, i64 0, i64 %idxprom
+ %0 = load i16, ptr %arrayidx, align 1
+ %conv = sext i16 %0 to i32
+ ret i32 %conv
+}
+
+; This is the same a ctz2_with_i8_gep but with the gep index multiplied by 2.
+define i32 @ctz2_with_i8_gep_fixed(i32 %x) {
+; CHECK-LABEL: @ctz2_with_i8_gep_fixed(
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false)
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+ %sub = sub i32 0, %x
+ %and = and i32 %x, %sub
+ %mul = mul i32 %and, 72416175
+ %shr = lshr i32 %mul, 25
+ %shr2 = and i32 %shr, 126
+ %1 = zext nneg i32 %shr2 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @ctz2.table, i64 %1
+ %2 = load i16, ptr %arrayidx, align 2
+ %conv = sext i16 %2 to i32
+ ret i32 %conv
+}
+
+; This is a i16 input with the debruijn table stored in a single i128.
+@tablei128 = internal unnamed_addr constant i128 16018378897745984667142067713738932480, align 16
+define i32 @cttz_i16_via_i128(i16 noundef %x) {
+; CHECK-LABEL: @cttz_i16_via_i128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i16 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP3]], i16 0, i16 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT: ret i32 [[CONV6]]
+;
+entry:
+ %sub = sub i16 0, %x
+ %and = and i16 %x, %sub
+ %mul = mul i16 %and, 2479
+ %0 = lshr i16 %mul, 12
+ %idxprom = zext nneg i16 %0 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @tablei128, i64 %idxprom
+ %1 = load i8, ptr %arrayidx, align 1
+ %conv6 = zext i8 %1 to i32
+ ret i32 %conv6
+}
+
+; Same as above but the table is a little off
+@tablei128b = internal unnamed_addr constant i128 16018378897745984667142068813250560256, align 16
+define i32 @cttz_i16_via_i128_incorrecttable(i16 noundef %x) {
+; CHECK-LABEL: @cttz_i16_via_i128_incorrecttable(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB:%.*]] = sub i16 0, [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[SUB]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i16 [[AND]], 2479
+; CHECK-NEXT: [[TMP0:%.*]] = lshr i16 [[MUL]], 12
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i16 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr @tablei128b, i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV6]]
+;
+entry:
+ %sub = sub i16 0, %x
+ %and = and i16 %x, %sub
+ %mul = mul i16 %and, 2479
+ %0 = lshr i16 %mul, 12
+ %idxprom = zext nneg i16 %0 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @tablei128b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx, align 1
+ %conv6 = zext i8 %1 to i32
+ ret i32 %conv6
+}
+
+; Same as ctz1 but the table and load is very large
+@ctz7i128.table = internal unnamed_addr constant [32 x i128] [i128 0, i128 1, i128 28, i128 2, i128 29, i128 14, i128 24, i128 3, i128 30, i128 22, i128 20, i128 15, i128 25, i128 17, i128 4, i128 8, i128 31, i128 27, i128 13, i128 23, i128 21, i128 19, i128 16, i128 7, i128 26, i128 12, i128 18, i128 6, i128 11, i128 5, i128 10, i128 9], align 16
+define i128 @ctz1_i128(i32 %x) {
+; CHECK-LABEL: @ctz1_i128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i128
+; CHECK-NEXT: ret i128 [[TMP3]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [32 x i128], ptr @ctz7i128.table, i64 0, i64 %idxprom
+ %l = load i128, ptr %arrayidx, align 1
+ ret i128 %l
+}
+
+; This is roughly the same as ctz1 but using i128.
+@table.i128 = internal unnamed_addr constant [128 x i8] c"\00\01e\02tf<\03|ug^R=!\04}yvWoh_5ZSE>0\22\14\05~rzPwmX.pkiI`K6\1Ab[TBMF?'81*#\1C\15\0E\06\7Fds;{]Q xVn4YD/\13qOl-jHJ\19aAL&7)\1B\0Dc:\\\1FU3C\12N,G\18@%(\0C9\1E2\11+\17$\0B\1D\10\16\0A\0F\09\08\07", align 1
+define i32 @src(i128 noundef %x) {
+; CHECK-LABEL: @src(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP3:%.*]] = call i128 @llvm.cttz.i128(i128 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i128 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i128 0, i128 [[TMP3]]
+; CHECK-NEXT: [[TMP0:%.*]] = trunc i128 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i128 0, %x
+ %and = and i128 %x, %sub
+ %mul = mul i128 %and, 2647824804797170443043024478319300753
+ %shr = lshr i128 %mul, 121
+ %idxprom = trunc i128 %shr to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @table.i128, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll b/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
index 714acd7..90836db8 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
@@ -66,7 +66,7 @@ entry:
;; This is a negative test with a wrong table size and constants.
-@ctz3.table = internal unnamed_addr constant [128 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
+@ctz3.table = internal unnamed_addr constant [128 x i8] c"\01\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
define i32 @ctz5(i32 %x) {
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll b/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
index 48b92e9..640c910 100644
--- a/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
+++ b/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
@@ -10,21 +10,21 @@ define void @pred_mip_12(ptr %dst, ptr %src, i32 %n, i64 %offset) {
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[N:%.*]], i64 [[OFFSET:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N]], i32 1)
+; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[SMAX]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[OFFSET]], [[TMP0]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]]
; CHECK-NEXT: br label %[[OUTER_LOOP:.*]]
; CHECK: [[OUTER_LOOP_LOOPEXIT:.*]]:
-; CHECK-NEXT: [[PTR_IV_NEXT_LCSSA:%.*]] = phi ptr [ [[PTR_IV_NEXT:%.*]], %[[INNER_LOOP:.*]] ]
; CHECK-NEXT: br label %[[OUTER_LOOP]]
; CHECK: [[OUTER_LOOP]]:
-; CHECK-NEXT: [[OUTER_PTR:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[PTR_IV_NEXT_LCSSA]], %[[OUTER_LOOP_LOOPEXIT]] ]
+; CHECK-NEXT: [[OUTER_PTR:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[SCEVGEP]], %[[OUTER_LOOP_LOOPEXIT]] ]
; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
; CHECK-NEXT: br i1 [[C]], label %[[INNER_LOOP_PREHEADER:.*]], label %[[EXIT:.*]]
; CHECK: [[INNER_LOOP_PREHEADER]]:
-; CHECK-NEXT: br label %[[INNER_LOOP]]
+; CHECK-NEXT: br label %[[INNER_LOOP:.*]]
; CHECK: [[INNER_LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[INNER_LOOP]] ], [ 0, %[[INNER_LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[PTR_IV_NEXT]], %[[INNER_LOOP]] ], [ [[SRC]], %[[INNER_LOOP_PREHEADER]] ]
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[OUTER_PTR]], align 1
-; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 [[OFFSET]]
; CHECK-NEXT: store i8 [[L]], ptr [[DST]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[SMAX]]
diff --git a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
index d24f9a4..17921af 100644
--- a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
+++ b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
@@ -15,11 +15,9 @@ define void @_Z3fn1v() {
; CHECK-NEXT: [[J_SROA_0_0_COPYLOAD:%.*]] = load i8, ptr [[X5]], align 1
; CHECK-NEXT: br label [[DOTPREHEADER4_LR_PH:%.*]]
; CHECK: .preheader4.lr.ph:
-; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[X4]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
; CHECK-NEXT: [[TMP4:%.*]] = sext i8 [[J_SROA_0_0_COPYLOAD]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[X4]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], [[TMP2]]
; CHECK-NEXT: br label [[DOTPREHEADER4:%.*]]
; CHECK: .preheader4:
; CHECK-NEXT: [[K_09:%.*]] = phi ptr [ undef, [[DOTPREHEADER4_LR_PH]] ], [ [[X25:%.*]], [[X22:%.*]] ]
diff --git a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll
index b57a45f..8a608a1 100644
--- a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll
+++ b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll
@@ -7,10 +7,10 @@ target triple = "aarch64"
declare void @streaming_compatible_f() #0 "aarch64_pstate_sm_compatible"
-; Function @streaming_callee doesn't contain any operations that may use ZA
+; Function @non_streaming_callee doesn't contain any operations that may use ZA
; state and therefore can be legally inlined into a normal function.
-define void @streaming_callee() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_callee
+define void @non_streaming_callee() #0 {
+; CHECK-LABEL: define void @non_streaming_callee
; CHECK-SAME: () #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: call void @streaming_compatible_f()
; CHECK-NEXT: call void @streaming_compatible_f()
@@ -21,26 +21,26 @@ define void @streaming_callee() #0 "aarch64_pstate_sm_enabled" {
ret void
}
-; Inline call to @streaming_callee to remove a streaming mode change.
-define void @non_streaming_caller_inline() #0 {
-; CHECK-LABEL: define void @non_streaming_caller_inline
+; Inline call to @non_streaming_callee to remove a streaming mode change.
+define void @streaming_caller_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_inline
; CHECK-SAME: () #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: call void @streaming_compatible_f()
; CHECK-NEXT: call void @streaming_compatible_f()
; CHECK-NEXT: ret void
;
- call void @streaming_callee()
+ call void @non_streaming_callee()
ret void
}
-; Don't inline call to @streaming_callee when the inline-threshold is set to 1, because it does not eliminate a streaming-mode change.
-define void @streaming_caller_dont_inline() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_dont_inline
+; Don't inline call to @non_streaming_callee when the inline-threshold is set to 1, because it does not eliminate a streaming-mode change.
+define void @non_streaming_caller_dont_inline() #0 {
+; CHECK-LABEL: define void @non_streaming_caller_dont_inline
; CHECK-SAME: () #[[ATTR1]] {
-; CHECK-NEXT: call void @streaming_callee()
+; CHECK-NEXT: call void @non_streaming_callee()
; CHECK-NEXT: ret void
;
- call void @streaming_callee()
+ call void @non_streaming_callee()
ret void
}
diff --git a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll
index 6cb1692..077a3aa 100644
--- a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll
+++ b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll
@@ -86,7 +86,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_normal_callee_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_normal_callee_inline
-; CHECK-SAME: () #[[ATTR6:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -103,7 +103,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_streaming_callee_dont_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -120,7 +120,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_streaming_compatible_callee_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -137,7 +137,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_locally_streaming_callee_dont_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -154,7 +154,7 @@ entry:
; [x] N -> SC + B
define i32 @normal_caller_streaming_compatible_locally_streaming_callee_dont_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_streaming_compatible_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_compatible_locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -171,7 +171,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR7:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -188,7 +188,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_streaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -205,7 +205,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -222,7 +222,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -239,7 +239,7 @@ entry:
; [x] S -> SC + B
define i32 @streaming_caller_streaming_compatible_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_streaming_compatible_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -256,7 +256,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR8:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -273,7 +273,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_streaming_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -290,7 +290,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -307,7 +307,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -324,7 +324,7 @@ entry:
; [x] N + B -> SC + B
define i32 @locally_streaming_caller_streaming_compatible_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_streaming_compatible_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -341,7 +341,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -358,7 +358,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_streaming_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -375,7 +375,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -392,7 +392,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_locally_streaming_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -409,7 +409,7 @@ entry:
; [x] SC -> SC + B
define i32 @streaming_compatible_caller_streaming_compatible_locally_streaming_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_streaming_compatible_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_compatible_locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -425,7 +425,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR10:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -442,7 +442,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_streaming_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -459,7 +459,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -476,7 +476,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -493,7 +493,7 @@ entry:
; [x] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_and_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_and_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -505,7 +505,7 @@ entry:
define void @normal_callee_with_inlineasm() #0 {
; CHECK-LABEL: define void @normal_callee_with_inlineasm
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: ret void
@@ -517,7 +517,7 @@ entry:
define void @streaming_caller_normal_callee_with_inlineasm_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define void @streaming_caller_normal_callee_with_inlineasm_dont_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @normal_callee_with_inlineasm()
; CHECK-NEXT: ret void
@@ -529,7 +529,7 @@ entry:
define i64 @normal_callee_with_intrinsic_call() #0 {
; CHECK-LABEL: define i64 @normal_callee_with_intrinsic_call
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.aarch64.sve.cntb(i32 4)
; CHECK-NEXT: ret i64 [[RES]]
@@ -541,7 +541,7 @@ entry:
define i64 @streaming_caller_normal_callee_with_intrinsic_call_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i64 @streaming_caller_normal_callee_with_intrinsic_call_dont_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i64 @normal_callee_with_intrinsic_call()
; CHECK-NEXT: ret i64 [[RES]]
@@ -555,7 +555,7 @@ declare i64 @llvm.aarch64.sve.cntb(i32)
define i64 @normal_callee_call_sme_state() #0 {
; CHECK-LABEL: define i64 @normal_callee_call_sme_state
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call { i64, i64 } @__arm_sme_state()
; CHECK-NEXT: [[RES_0:%.*]] = extractvalue { i64, i64 } [[RES]], 0
@@ -571,7 +571,7 @@ declare {i64, i64} @__arm_sme_state()
define i64 @streaming_caller_normal_callee_call_sme_state_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i64 @streaming_caller_normal_callee_call_sme_state_dont_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i64 @normal_callee_call_sme_state()
; CHECK-NEXT: ret i64 [[RES]]
@@ -583,57 +583,57 @@ entry:
-declare void @streaming_body() "aarch64_pstate_sm_enabled"
+declare void @nonstreaming_body()
-define void @streaming_caller_single_streaming_callee() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_callee
-; CHECK-SAME: () #[[ATTR7]] {
-; CHECK-NEXT: call void @streaming_body()
+define void @nonstreaming_caller_single_nonstreaming_callee() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_single_nonstreaming_callee
+; CHECK-SAME: () #[[ATTR1]] {
+; CHECK-NEXT: call void @nonstreaming_body()
; CHECK-NEXT: ret void
;
- call void @streaming_body()
+ call void @nonstreaming_body()
ret void
}
-define void @streaming_caller_multiple_streaming_callees() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_callees
-; CHECK-SAME: () #[[ATTR7]] {
-; CHECK-NEXT: call void @streaming_body()
-; CHECK-NEXT: call void @streaming_body()
+define void @nonstreaming_caller_multiple_nonstreaming_callees() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_multiple_nonstreaming_callees
+; CHECK-SAME: () #[[ATTR1]] {
+; CHECK-NEXT: call void @nonstreaming_body()
+; CHECK-NEXT: call void @nonstreaming_body()
; CHECK-NEXT: ret void
;
- call void @streaming_body()
- call void @streaming_body()
+ call void @nonstreaming_body()
+ call void @nonstreaming_body()
ret void
}
; Allow inlining, as inline it would not increase the number of streaming-mode changes.
-define void @streaming_caller_single_streaming_callee_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR6]] {
-; CHECK-NEXT: call void @streaming_body()
+define void @streaming_caller_to_nonstreaming_callee_with_single_nonstreaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_single_nonstreaming_callee_inline
+; CHECK-SAME: () #[[ATTR2]] {
+; CHECK-NEXT: call void @nonstreaming_body()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_single_streaming_callee()
+ call void @nonstreaming_caller_single_nonstreaming_callee()
ret void
}
-; Prevent inlining, as inline it would lead to multiple streaming-mode changes.
-define void @streaming_caller_multiple_streaming_callees_dont_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_callees_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
-; CHECK-NEXT: call void @streaming_caller_multiple_streaming_callees()
+; Prevent inlining, as inlining it would lead to multiple streaming-mode changes.
+define void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline
+; CHECK-SAME: () #[[ATTR2]] {
+; CHECK-NEXT: call void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_multiple_streaming_callees()
+ call void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline()
ret void
}
declare void @streaming_compatible_body() "aarch64_pstate_sm_compatible"
-define void @streaming_caller_single_streaming_compatible_callee() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_compatible_callee
-; CHECK-SAME: () #[[ATTR7]] {
+define void @nonstreaming_caller_single_streaming_compatible_callee() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_single_streaming_compatible_callee
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
;
@@ -641,9 +641,9 @@ define void @streaming_caller_single_streaming_compatible_callee() #0 "aarch64_
ret void
}
-define void @streaming_caller_multiple_streaming_compatible_callees() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_compatible_callees
-; CHECK-SAME: () #[[ATTR7]] {
+define void @nonstreaming_caller_multiple_streaming_compatible_callees() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_multiple_streaming_compatible_callees
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
@@ -654,25 +654,67 @@ define void @streaming_caller_multiple_streaming_compatible_callees() #0 "aarch
}
; Allow inlining, as inline would remove a streaming-mode change.
-define void @streaming_caller_single_streaming_compatible_callee_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR6]] {
+define void @streaming_caller_to_nonstreaming_callee_with_single_streamingcompatible_callee_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_single_streamingcompatible_callee_inline
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_single_streaming_compatible_callee()
+ call void @nonstreaming_caller_single_streaming_compatible_callee()
ret void
}
-; Allow inlining, as inline would remove several stremaing-mode changes.
-define void @streaming_caller_multiple_streaming_compatible_callees_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_compatible_callees_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; Allow inlining, as inline would remove several streaming-mode changes.
+define void @streaming_caller_to_nonstreaming_callee_with_multiple_streamingcompatible_callees_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_multiple_streamingcompatible_callees_inline
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_multiple_streaming_compatible_callees()
+ call void @nonstreaming_caller_multiple_streaming_compatible_callees()
+ ret void
+}
+
+define void @simple_streaming_function(ptr %ptr) #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @simple_streaming_function
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[PTR]], align 16
+; CHECK-NEXT: ret void
+;
+ store <vscale x 4 x i32> zeroinitializer, ptr %ptr
+ ret void
+}
+
+; Don't allow inlining a streaming function into a non-streaming function.
+define void @non_streaming_caller_streaming_callee_dont_inline(ptr %ptr) #0 {
+; CHECK-LABEL: define void @non_streaming_caller_streaming_callee_dont_inline
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @simple_streaming_function(ptr [[PTR]])
+; CHECK-NEXT: ret void
+;
+ call void @simple_streaming_function(ptr %ptr)
+ ret void
+}
+
+define void @simple_locally_streaming_function(ptr %ptr) #0 "aarch64_pstate_sm_body" {
+; CHECK-LABEL: define void @simple_locally_streaming_function
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[PTR]], align 16
+; CHECK-NEXT: ret void
+;
+ store <vscale x 4 x i32> zeroinitializer, ptr %ptr
+ ret void
+}
+
+; Don't allow inlining a locally-streaming function into a non-streaming function.
+define void @non_streaming_caller_locally_streaming_callee_dont_inline(ptr %ptr) #0 {
+; CHECK-LABEL: define void @non_streaming_caller_locally_streaming_callee_dont_inline
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @simple_locally_streaming_function(ptr [[PTR]])
+; CHECK-NEXT: ret void
+;
+ call void @simple_locally_streaming_function(ptr %ptr)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
index 934852d..02042b1 100644
--- a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
+++ b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
@@ -131,10 +131,10 @@ define i1 @test5(double %x, i1 %cond) {
; CHECK: if.then:
; CHECK-NEXT: ret i1 false
; CHECK: if.end:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[X]], i32 408)
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[Y:%.*]] = phi double [ -1.000000e+00, [[ENTRY:%.*]] ], [ [[X]], [[IF_END]] ]
-; CHECK-NEXT: [[RET:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[Y]], i32 408)
+; CHECK-NEXT: [[RET:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TMP0]], [[IF_END]] ]
; CHECK-NEXT: ret i1 [[RET]]
;
entry:
@@ -391,11 +391,9 @@ define float @test_signbit_check_fail(float %x, i1 %cond) {
; CHECK: if.else:
; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN2:%.*]], label [[IF_END]]
; CHECK: if.then2:
-; CHECK-NEXT: [[FNEG2:%.*]] = fneg float [[X]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[VALUE:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[FNEG2]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
-; CHECK-NEXT: [[RET:%.*]] = call float @llvm.fabs.f32(float [[VALUE]])
+; CHECK-NEXT: [[RET:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[X]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
; CHECK-NEXT: ret float [[RET]]
;
%i32 = bitcast float %x to i32
diff --git a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
index a92e0c2..e2f22b8 100644
--- a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
+++ b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
@@ -293,3 +293,183 @@ entry:
%p2 = getelementptr <vscale x 4 x i32>, ptr %p1, i64 %index
ret ptr %p2
}
+
+define ptr @test_all_nuw(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_nuw(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_nuw1(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_nuw1(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_nuw2(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_nuw2(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_nuw3(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_nuw3(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_nuw_disjoint(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_nuw_disjoint(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = or disjoint i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_nuw(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_nuw(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_inbounds1(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_inbounds1(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_inbounds2(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_inbounds2(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_partial_nuw1(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_partial_nuw1(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 7
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds i8, ptr %base, i64 -1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_partial_nuw2(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_partial_nuw2(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_partial_nuw3(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_partial_nuw3(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_nusw_nuw(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_nusw_nuw(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nusw nuw i8, ptr %base, i64 1
+ %index = add nsw nuw i64 %a, 2
+ %p2 = getelementptr nusw nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 7c1342a..55b5b5e 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -356,7 +356,7 @@ define i1 @test13_i16(i16 %X, ptr %P) {
define i1 @test13_i128(i128 %X, ptr %P) {
; CHECK-LABEL: @test13_i128(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[X:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[X:%.*]] to i64
; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[TMP1]], -1
; CHECK-NEXT: ret i1 [[C]]
;
@@ -412,7 +412,7 @@ define ptr @test_index_canon_inbounds(ptr %X, i32 %Idx) {
define ptr @test_index_canon_nusw_nuw(ptr %X, i32 %Idx) {
; CHECK-LABEL: @test_index_canon_nusw_nuw(
-; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[IDX:%.*]] to i64
; CHECK-NEXT: [[R:%.*]] = getelementptr nusw nuw i32, ptr [[X:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[R]]
;
@@ -568,7 +568,7 @@ define i32 @test20(ptr %P, i32 %A, i32 %B) {
define i32 @test20_as1(ptr addrspace(1) %P, i32 %A, i32 %B) {
; CHECK-LABEL: @test20_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[A:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i32 [[A:%.*]] to i16
; CHECK-NEXT: [[T6:%.*]] = icmp eq i16 [[TMP1]], 0
; CHECK-NEXT: [[T7:%.*]] = zext i1 [[T6]] to i32
; CHECK-NEXT: ret i32 [[T7]]
@@ -1978,4 +1978,94 @@ define ptr @gep_merge_nusw_const(ptr %p, i64 %idx, i64 %idx2) {
ret ptr %gep
}
+define ptr @gep_index_trunc_nothing(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nothing(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nuw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nusw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nusw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_inbounds(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_inbounds(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr inbounds i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nusw_nuw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nusw_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw nuw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nothing(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nothing(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nuw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nusw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nusw(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nusw_nuw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nusw_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
!0 = !{!"branch_weights", i32 2, i32 10}
diff --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
index 1296dc6..f873551 100644
--- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
@@ -39,9 +39,9 @@ define i1 @test59_as1(ptr addrspace(1) %foo) {
define i1 @test60(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i32
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i32 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -53,9 +53,9 @@ define i1 @test60(ptr %foo, i64 %i, i64 %j) {
define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i16
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i16
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -69,7 +69,7 @@ define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; bitcast. This uses the same sized addrspace.
define i1 @test60_addrspacecast(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[J:%.*]] to i32
; CHECK-NEXT: [[I_TR:%.*]] = trunc i64 [[I:%.*]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[I_TR]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], [[TMP1]]
diff --git a/llvm/test/Transforms/InstCombine/icmp-gep.ll b/llvm/test/Transforms/InstCombine/icmp-gep.ll
index 5044850ec..1385dc3 100644
--- a/llvm/test/Transforms/InstCombine/icmp-gep.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-gep.ll
@@ -286,8 +286,7 @@ define i1 @PR8882(i64 %i) {
define i1 @test24_as1(i64 %i) {
; CHECK-LABEL: @test24_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[I:%.*]], 65535
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 1000
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1:%.*]], 1000
; CHECK-NEXT: ret i1 [[CMP]]
;
%p1 = getelementptr inbounds i32, ptr addrspace(1) @X_as1, i64 %i
@@ -449,9 +448,9 @@ define i1 @test_gep_eq_no_inbounds(ptr %foo, i64 %i, i64 %j) {
define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i16
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i16
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
index 53c9736..07486ff 100644
--- a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
+++ b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
@@ -376,7 +376,7 @@ define i1 @test8(ptr %in, i64 %offset) {
; CHECK-NEXT: [[LD:%.*]] = load i64, ptr [[IN:%.*]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[LD]] to i32
; CHECK-NEXT: [[CASTI8:%.*]] = inttoptr i32 [[TMP0]] to ptr
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[OFFSET:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[OFFSET:%.*]] to i32
; CHECK-NEXT: [[GEPI8:%.*]] = getelementptr inbounds i8, ptr [[CASTI8]], i32 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[LD]] to i32
; CHECK-NEXT: [[PTRCAST:%.*]] = inttoptr i32 [[TMP2]] to ptr
diff --git a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
index c05cca9..ac44e6c 100644
--- a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
+++ b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
@@ -261,14 +261,11 @@ define i8 @knownbits_umax_select_test() {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[CONTAIN:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[COND0:%.*]] = call i1 @cond()
-; CHECK-NEXT: [[CONTAIN]] = call i8 @llvm.umax.i8(i8 [[INDVAR]], i8 1)
; CHECK-NEXT: [[COND1:%.*]] = call i1 @cond()
; CHECK-NEXT: br i1 [[COND1]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[BOOL:%.*]] = and i8 [[CONTAIN]], 1
-; CHECK-NEXT: ret i8 [[BOOL]]
+; CHECK-NEXT: ret i8 1
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index ccaf31f..df34e7d 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -293,8 +293,7 @@ define i1 @test10_struct_arr_i16(i16 %x) {
define i1 @test10_struct_arr_i64(i64 %x) {
; CHECK-LABEL: @test10_struct_arr_i64(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 4294967295
-; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1]], 1
+; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1:%.*]], 1
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i64 0, i64 %x, i32 2
@@ -331,7 +330,7 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
define i1 @pr93017(i64 %idx) {
; CHECK-LABEL: @pr93017(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[IDX:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[IDX:%.*]] to i32
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr @table, i32 0, i32 [[TMP1]]
; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[GEP]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[V]], null
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 4756b4f..3454835 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -2998,3 +2998,58 @@ join:
%cmp = icmp eq i32 %phi, 0
ret i1 %cmp
}
+
+declare void @may_exit()
+
+define i32 @intrinsic_over_phi_noundef(i1 %c, i1 %c2, i32 %a) {
+; CHECK-LABEL: @intrinsic_over_phi_noundef(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.umax.i32(i32 [[A:%.*]], i32 1)
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[TMP0]], [[IF]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT: call void @may_exit()
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c, label %if, label %join
+
+if:
+ br label %join
+
+join:
+ %phi = phi i32 [ %a, %if ], [ 0, %entry ]
+ call void @may_exit()
+ %umax = call noundef i32 @llvm.umax(i32 noundef %phi, i32 1)
+ ret i32 %umax
+}
+
+define i32 @multiple_intrinsics_with_multiple_phi_uses(i1 %c, i32 %arg) {
+; CHECK-LABEL: @multiple_intrinsics_with_multiple_phi_uses(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[IF_END:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ARG:%.*]], -8
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[ADD]], i32 [[ADD]], i32 29)
+; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 1
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[TMP1]], [[IF]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c, label %if, label %if.end
+
+if:
+ %add = add i32 %arg, -8
+ br label %if.end
+
+if.end:
+ %phi = phi i32 [ %add, %if ], [ 0, %entry ]
+ %fshl1 = call i32 @llvm.fshl.i32(i32 %phi, i32 %phi, i32 29)
+ %fshl2 = call i32 @llvm.fshl.i32(i32 %phi, i32 %phi, i32 29)
+ %add2 = add i32 %fshl1, %fshl2
+ ret i32 %add2
+}
diff --git a/llvm/test/Transforms/InstCombine/pr39908.ll b/llvm/test/Transforms/InstCombine/pr39908.ll
index 5d13a33..c36495d 100644
--- a/llvm/test/Transforms/InstCombine/pr39908.ll
+++ b/llvm/test/Transforms/InstCombine/pr39908.ll
@@ -19,7 +19,7 @@ define i1 @test(ptr %p, i32 %n) {
; Same test using 64-bit indices.
define i1 @test64(ptr %p, i64 %n) {
; CHECK-LABEL: @test64(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[N:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -32,7 +32,7 @@ define i1 @test64(ptr %p, i64 %n) {
; Here the offset overflows and is treated modulo 2^32. This is UB.
define i1 @test64_overflow(ptr %p, i64 %n) {
; CHECK-LABEL: @test64_overflow(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[N:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
index c637481..86e586e 100644
--- a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
+++ b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
@@ -8,12 +8,11 @@ define i8 @simple_recurrence_intrinsic_smax(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SMAX_ACC:%.*]] = phi i8 [ [[SMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[SMAX]] = call i8 @llvm.smax.i8(i8 [[SMAX_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SMAX:%.*]] = call i8 @llvm.smax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[SMAX]]
;
entry:
@@ -38,12 +37,11 @@ define i8 @simple_recurrence_intrinsic_smin(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SMIN_ACC:%.*]] = phi i8 [ [[SMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[SMIN]] = call i8 @llvm.smin.i8(i8 [[SMIN_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SMIN:%.*]] = call i8 @llvm.smin.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[SMIN]]
;
entry:
@@ -68,12 +66,11 @@ define i8 @simple_recurrence_intrinsic_umax(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[UMAX:%.*]] = call i8 @llvm.umax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[UMAX]]
;
entry:
@@ -98,12 +95,11 @@ define i8 @simple_recurrence_intrinsic_umin(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[UMIN_ACC:%.*]] = phi i8 [ [[UMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[UMIN]] = call i8 @llvm.umin.i8(i8 [[UMIN_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[UMIN:%.*]] = call i8 @llvm.umin.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[UMIN]]
;
entry:
@@ -128,12 +124,11 @@ define float @simple_recurrence_intrinsic_maxnum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call float @llvm.maxnum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call float @llvm.maxnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -157,12 +152,11 @@ define float @simple_recurrence_intrinsic_minnum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call float @llvm.minnum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call float @llvm.minnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
@@ -186,12 +180,11 @@ define float @simple_recurrence_intrinsic_maximum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call nnan float @llvm.maximum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -215,12 +208,11 @@ define float @simple_recurrence_intrinsic_minimum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call nnan float @llvm.minimum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll b/llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll
new file mode 100644
index 0000000..1e089e1
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+define i1 @extract_const_idx(<vscale x 4 x i1> %a) {
+; CHECK-LABEL: define i1 @extract_const_idx(
+; CHECK-SAME: <vscale x 4 x i1> [[A:%.*]]) {
+; CHECK-NEXT: [[ELT:%.*]] = extractelement <vscale x 4 x i1> [[A]], i64 1
+; CHECK-NEXT: ret i1 [[ELT]]
+;
+ %subvec = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv4i1.i64(<vscale x 4 x i1> %a, i64 0)
+ %elt = extractelement <vscale x 2 x i1> %subvec, i32 1
+ ret i1 %elt
+}
+
+define float @extract_variable_idx(<vscale x 4 x float> %a, i32 %idx) {
+; CHECK-LABEL: define float @extract_variable_idx(
+; CHECK-SAME: <vscale x 4 x float> [[A:%.*]], i32 [[IDX:%.*]]) {
+; CHECK-NEXT: [[ELT:%.*]] = extractelement <vscale x 4 x float> [[A]], i32 [[IDX]]
+; CHECK-NEXT: ret float [[ELT]]
+;
+ %subvec = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32.i64(<vscale x 4 x float> %a, i64 0)
+ %elt = extractelement <vscale x 2 x float> %subvec, i32 %idx
+ ret float %elt
+}
+
+define float @negative_test(<vscale x 4 x float> %a) {
+; CHECK-LABEL: define float @negative_test(
+; CHECK-SAME: <vscale x 4 x float> [[A:%.*]]) {
+; CHECK-NEXT: [[SUBVEC:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[A]], i64 2)
+; CHECK-NEXT: [[ELT:%.*]] = extractelement <vscale x 2 x float> [[SUBVEC]], i64 1
+; CHECK-NEXT: ret float [[ELT]]
+;
+ %subvec = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32.i64(<vscale x 4 x float> %a, i64 2)
+ %elt = extractelement <vscale x 2 x float> %subvec, i32 1
+ ret float %elt
+}
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index 45e5686..0db5cbe 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -394,7 +394,7 @@ define i64 @negative_ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
; CHECK-LABEL: @test25_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[A:%.*]] to i16
; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i16 [[TMP1]], 1
; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i16 [[B_IDX]], -84
; CHECK-NEXT: ret i16 [[GEPDIFF]]
diff --git a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll
index 75b8509..6eed7f8 100644
--- a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll
+++ b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll
@@ -416,6 +416,54 @@ define float @test_round_ftz_f_neg_1_5() {
ret float %res
}
+define double @test_round_d_2_5() {
+; CHECK-LABEL: define double @test_round_d_2_5() {
+; CHECK-NEXT: ret double 2.000000e+00
+;
+ %res = call double @llvm.nvvm.round.d(double 2.5)
+ ret double %res
+}
+
+define float @test_round_f_2_5() {
+; CHECK-LABEL: define float @test_round_f_2_5() {
+; CHECK-NEXT: ret float 2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.f(float 2.5)
+ ret float %res
+}
+
+define float @test_round_ftz_f_2_5() {
+; CHECK-LABEL: define float @test_round_ftz_f_2_5() {
+; CHECK-NEXT: ret float 2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.ftz.f(float 2.5)
+ ret float %res
+}
+
+define double @test_round_d_neg_2_5() {
+; CHECK-LABEL: define double @test_round_d_neg_2_5() {
+; CHECK-NEXT: ret double -2.000000e+00
+;
+ %res = call double @llvm.nvvm.round.d(double -2.5)
+ ret double %res
+}
+
+define float @test_round_f_neg_2_5() {
+; CHECK-LABEL: define float @test_round_f_neg_2_5() {
+; CHECK-NEXT: ret float -2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.f(float -2.5)
+ ret float %res
+}
+
+define float @test_round_ftz_f_neg_2_5() {
+; CHECK-LABEL: define float @test_round_ftz_f_neg_2_5() {
+; CHECK-NEXT: ret float -2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.ftz.f(float -2.5)
+ ret float %res
+}
+
define double @test_round_d_neg_subnorm() {
; CHECK-LABEL: define double @test_round_d_neg_subnorm() {
; CHECK-NEXT: ret double -0.000000e+00
diff --git a/llvm/test/Transforms/LICM/gep-reassociate.ll b/llvm/test/Transforms/LICM/gep-reassociate.ll
index 630a751..0090c76 100644
--- a/llvm/test/Transforms/LICM/gep-reassociate.ll
+++ b/llvm/test/Transforms/LICM/gep-reassociate.ll
@@ -39,11 +39,13 @@ exit:
ret void
}
-define void @both_inbounds_one_neg(ptr %ptr, i1 %c) {
+define void @both_inbounds_one_neg(ptr %ptr, i1 %c, i64 %neg) {
; CHECK-LABEL: define void @both_inbounds_one_neg
-; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]]) {
+; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]], i64 [[NEG:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 -1
+; CHECK-NEXT: [[IS_NEG:%.*]] = icmp slt i64 [[NEG]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[IS_NEG]])
+; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[NEG]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[VAL:%.*]] = call i32 @get.i32()
@@ -55,13 +57,15 @@ define void @both_inbounds_one_neg(ptr %ptr, i1 %c) {
; CHECK-NEXT: ret void
;
entry:
+ %is.neg = icmp slt i64 %neg, 0
+ call void @llvm.assume(i1 %is.neg)
br label %loop
loop:
%val = call i32 @get.i32()
%val.ext = zext i32 %val to i64
%ptr2 = getelementptr inbounds i8, ptr %ptr, i64 %val.ext
- %ptr3 = getelementptr i8, ptr %ptr2, i64 -1
+ %ptr3 = getelementptr i8, ptr %ptr2, i64 %neg
call void @use(ptr %ptr3)
br i1 %c, label %loop, label %exit
@@ -69,11 +73,13 @@ exit:
ret void
}
-define void @both_inbounds_pos(ptr %ptr, i1 %c) {
+define void @both_inbounds_pos(ptr %ptr, i1 %c, i64 %nonneg) {
; CHECK-LABEL: define void @both_inbounds_pos
-; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]]) {
+; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]], i64 [[NONNEG:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
+; CHECK-NEXT: [[IS_NONNEG:%.*]] = icmp sge i64 [[NONNEG]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[IS_NONNEG]])
+; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[NONNEG]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[VAL:%.*]] = call i32 @get.i32()
@@ -85,13 +91,15 @@ define void @both_inbounds_pos(ptr %ptr, i1 %c) {
; CHECK-NEXT: ret void
;
entry:
+ %is.nonneg = icmp sge i64 %nonneg, 0
+ call void @llvm.assume(i1 %is.nonneg)
br label %loop
loop:
%val = call i32 @get.i32()
%val.ext = zext i32 %val to i64
%ptr2 = getelementptr inbounds i8, ptr %ptr, i64 %val.ext
- %ptr3 = getelementptr inbounds i8, ptr %ptr2, i64 1
+ %ptr3 = getelementptr inbounds i8, ptr %ptr2, i64 %nonneg
call void @use(ptr %ptr3)
br i1 %c, label %loop, label %exit
@@ -440,3 +448,32 @@ latch:
exit:
ret void
}
+
+; Do not reassociate constant offset GEP.
+define void @constant_offset(ptr %ptr, i1 %c) {
+; CHECK-LABEL: define void @constant_offset
+; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[VAL:%.*]] = call i64 @get.i64()
+; CHECK-NEXT: [[GEP_BASE:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[VAL]]
+; CHECK-NEXT: [[GEP_OFF:%.*]] = getelementptr i8, ptr [[GEP_BASE]], i64 1
+; CHECK-NEXT: call void @use(ptr [[GEP_OFF]])
+; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %val = call i64 @get.i64()
+ %gep.base = getelementptr i8, ptr %ptr, i64 %val
+ %gep.off = getelementptr i8, ptr %gep.base, i64 1
+ call void @use(ptr %gep.off)
+ br i1 %c, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
index 65aaf72..cd35401 100644
--- a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
+++ b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
@@ -33,11 +33,10 @@ define void @scev_expand_ptrtoint(i8 %x, ptr %start) {
; CHECK: [[LOOP_3_PREHEADER]]:
; CHECK-NEXT: [[INDVAR_LCSSA:%.*]] = phi i64 [ [[INDVAR]], %[[LOOP_2_HEADER]] ], [ [[INDVAR]], %[[LOOP_2_HEADER]] ]
; CHECK-NEXT: [[PTR_IV_2_LCSSA:%.*]] = phi ptr [ [[PTR_IV_2]], %[[LOOP_2_HEADER]] ], [ [[PTR_IV_2]], %[[LOOP_2_HEADER]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = sub i64 0, [[START1]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 1, [[START1]]
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR_IV_1_LCSSA]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP0]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[CMP_EXT]], [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[CMP_EXT]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDVAR_LCSSA]], [[TMP4]]
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP5]]
; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr [[SCEVGEP]])
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
index aa954aeb..9003072 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
@@ -383,14 +383,14 @@ define void @vscale_squared_offset(ptr %alloc) #0 {
; COMMON-LABEL: vscale_squared_offset:
; COMMON: // %bb.0: // %entry
; COMMON-NEXT: rdvl x9, #1
+; COMMON-NEXT: rdvl x10, #4
; COMMON-NEXT: fmov z0.s, #4.00000000
-; COMMON-NEXT: mov x8, xzr
; COMMON-NEXT: lsr x9, x9, #4
; COMMON-NEXT: fmov z1.s, #8.00000000
-; COMMON-NEXT: cntw x10
+; COMMON-NEXT: mov x8, xzr
; COMMON-NEXT: ptrue p0.s, vl1
-; COMMON-NEXT: umull x9, w9, w9
-; COMMON-NEXT: lsl x9, x9, #6
+; COMMON-NEXT: umull x9, w9, w10
+; COMMON-NEXT: cntw x10
; COMMON-NEXT: cmp x8, x10
; COMMON-NEXT: b.ge .LBB6_2
; COMMON-NEXT: .LBB6_1: // %for.body
diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
index 8baded8..38d559f 100644
--- a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
+++ b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
@@ -485,12 +485,206 @@ exit: ; preds = %vector.body
!0 = !{!0, !1}
!1 = !{!"llvm.loop.isvectorized", i32 1}
+; On Cortex-A55 we should runtime unroll the scalar epilogue loop, but not the
+; vector loop.
+define void @scalar_epilogue(ptr %p, i8 %splat.scalar, i64 %n) {
+; APPLE-LABEL: define void @scalar_epilogue(
+; APPLE-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; APPLE-NEXT: [[ENTRY:.*]]:
+; APPLE-NEXT: [[MIN_ITERS_CHECK7:%.*]] = icmp ult i64 [[N]], 32
+; APPLE-NEXT: br i1 [[MIN_ITERS_CHECK7]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; APPLE: [[VECTOR_PH]]:
+; APPLE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; APPLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; APPLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; APPLE-NEXT: br label %[[VECTOR_BODY:.*]]
+; APPLE: [[VECTOR_BODY]]:
+; APPLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; APPLE-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INDEX]]
+; APPLE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
+; APPLE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; APPLE-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; APPLE-NEXT: [[TMP2:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; APPLE-NEXT: [[TMP3:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
+; APPLE-NEXT: store <16 x i8> [[TMP2]], ptr [[TMP0]], align 1
+; APPLE-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP1]], align 1
+; APPLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; APPLE-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; APPLE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; APPLE: [[MIDDLE_BLOCK]]:
+; APPLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; APPLE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; APPLE: [[SCALAR_REMAINDER_PREHEADER]]:
+; APPLE-NEXT: [[IV_SCALAR_LOOP_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; APPLE-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; APPLE: [[SCALAR_REMAINDER]]:
+; APPLE-NEXT: [[I_06:%.*]] = phi i64 [ [[INC:%.*]], %[[SCALAR_REMAINDER]] ], [ [[IV_SCALAR_LOOP_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ]
+; APPLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06]]
+; APPLE-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; APPLE-NEXT: [[ADD:%.*]] = add i8 [[TMP8]], [[SPLAT_SCALAR]]
+; APPLE-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; APPLE-NEXT: [[INC]] = add nuw i64 [[I_06]], 1
+; APPLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; APPLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP5:![0-9]+]]
+; APPLE: [[EXIT_LOOPEXIT]]:
+; APPLE-NEXT: br label %[[EXIT]]
+; APPLE: [[EXIT]]:
+; APPLE-NEXT: ret void
+;
+; CORTEXA55-LABEL: define void @scalar_epilogue(
+; CORTEXA55-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CORTEXA55-NEXT: [[ENTRY:.*]]:
+; CORTEXA55-NEXT: [[MIN_ITERS_CHECK7:%.*]] = icmp ult i64 [[N]], 32
+; CORTEXA55-NEXT: br i1 [[MIN_ITERS_CHECK7]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; CORTEXA55: [[VECTOR_PH]]:
+; CORTEXA55-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; CORTEXA55-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; CORTEXA55-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CORTEXA55-NEXT: br label %[[VECTOR_BODY:.*]]
+; CORTEXA55: [[VECTOR_BODY]]:
+; CORTEXA55-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CORTEXA55-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INDEX]]
+; CORTEXA55-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
+; CORTEXA55-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; CORTEXA55-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; CORTEXA55-NEXT: [[TMP2:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CORTEXA55-NEXT: [[TMP3:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
+; CORTEXA55-NEXT: store <16 x i8> [[TMP2]], ptr [[TMP0]], align 1
+; CORTEXA55-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP1]], align 1
+; CORTEXA55-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CORTEXA55-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CORTEXA55-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CORTEXA55: [[MIDDLE_BLOCK]]:
+; CORTEXA55-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CORTEXA55-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; CORTEXA55: [[SCALAR_REMAINDER_PREHEADER]]:
+; CORTEXA55-NEXT: [[I_06_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CORTEXA55-NEXT: [[TMP8:%.*]] = sub i64 [[N]], [[I_06_PH]]
+; CORTEXA55-NEXT: [[TMP9:%.*]] = add i64 [[N]], -1
+; CORTEXA55-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], [[I_06_PH]]
+; CORTEXA55-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP8]], 3
+; CORTEXA55-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; CORTEXA55-NEXT: br i1 [[LCMP_MOD]], label %[[SCALAR_REMAINDER_PROL_PREHEADER:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_PREHEADER]]:
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06_PH]]
+; CORTEXA55-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX_PROL]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL:%.*]] = add i8 [[TMP11]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL]], ptr [[ARRAYIDX_PROL]], align 1
+; CORTEXA55-NEXT: [[INC_PROL:%.*]] = add nuw i64 [[I_06_PH]], 1
+; CORTEXA55-NEXT: [[PROL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; CORTEXA55-NEXT: br i1 [[PROL_ITER_CMP]], label %[[SCALAR_REMAINDER_PROL_1:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_1]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL]]
+; CORTEXA55-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX_PROL_1]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL_1:%.*]] = add i8 [[TMP12]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL_1]], ptr [[ARRAYIDX_PROL_1]], align 1
+; CORTEXA55-NEXT: [[INC_PROL_1:%.*]] = add nuw i64 [[I_06_PH]], 2
+; CORTEXA55-NEXT: [[PROL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; CORTEXA55-NEXT: br i1 [[PROL_ITER_CMP_1]], label %[[SCALAR_REMAINDER_PROL_2:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_2]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_1]]
+; CORTEXA55-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX_PROL_2]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL_2:%.*]] = add i8 [[TMP13]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL_2]], ptr [[ARRAYIDX_PROL_2]], align 1
+; CORTEXA55-NEXT: [[INC_PROL_2:%.*]] = add nuw i64 [[I_06_PH]], 3
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]:
+; CORTEXA55-NEXT: [[IV_SCALAR_LOOP_UNR_PH:%.*]] = phi i64 [ [[INC_PROL]], %[[SCALAR_REMAINDER_PROL]] ], [ [[INC_PROL_1]], %[[SCALAR_REMAINDER_PROL_1]] ], [ [[INC_PROL_2]], %[[SCALAR_REMAINDER_PROL_2]] ]
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_LOOPEXIT]]:
+; CORTEXA55-NEXT: [[IV_SCALAR_LOOP_UNR:%.*]] = phi i64 [ [[I_06_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ], [ [[IV_SCALAR_LOOP_UNR_PH]], %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]] ]
+; CORTEXA55-NEXT: [[TMP14:%.*]] = icmp ult i64 [[TMP10]], 3
+; CORTEXA55-NEXT: br i1 [[TMP14]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER_NEW:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PREHEADER_NEW]]:
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER]]:
+; CORTEXA55-NEXT: [[I_06:%.*]] = phi i64 [ [[IV_SCALAR_LOOP_UNR]], %[[SCALAR_REMAINDER_PREHEADER_NEW]] ], [ [[INC_3:%.*]], %[[SCALAR_REMAINDER]] ]
+; CORTEXA55-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06]]
+; CORTEXA55-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CORTEXA55-NEXT: [[ADD:%.*]] = add i8 [[TMP15]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CORTEXA55-NEXT: [[INC:%.*]] = add nuw i64 [[I_06]], 1
+; CORTEXA55-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC]]
+; CORTEXA55-NEXT: [[TMP16:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; CORTEXA55-NEXT: [[ADD_1:%.*]] = add i8 [[TMP16]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX_1]], align 1
+; CORTEXA55-NEXT: [[INC_1:%.*]] = add nuw i64 [[I_06]], 2
+; CORTEXA55-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_1]]
+; CORTEXA55-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; CORTEXA55-NEXT: [[ADD_2:%.*]] = add i8 [[TMP17]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_2]], ptr [[ARRAYIDX_2]], align 1
+; CORTEXA55-NEXT: [[INC_2:%.*]] = add nuw i64 [[I_06]], 3
+; CORTEXA55-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_2]]
+; CORTEXA55-NEXT: [[TMP18:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; CORTEXA55-NEXT: [[ADD_3:%.*]] = add i8 [[TMP18]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_3]], ptr [[ARRAYIDX_3]], align 1
+; CORTEXA55-NEXT: [[INC_3]] = add nuw i64 [[I_06]], 4
+; CORTEXA55-NEXT: [[EXITCOND_NOT_3:%.*]] = icmp eq i64 [[INC_3]], [[N]]
+; CORTEXA55-NEXT: br i1 [[EXITCOND_NOT_3]], label %[[EXIT_LOOPEXIT_UNR_LCSSA:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CORTEXA55: [[EXIT_LOOPEXIT_UNR_LCSSA]]:
+; CORTEXA55-NEXT: br label %[[EXIT_LOOPEXIT]]
+; CORTEXA55: [[EXIT_LOOPEXIT]]:
+; CORTEXA55-NEXT: br label %[[EXIT]]
+; CORTEXA55: [[EXIT]]:
+; CORTEXA55-NEXT: ret void
+;
+entry:
+ %min.iters.check = icmp ult i64 %n, 32
+ br i1 %min.iters.check, label %scalar.remainder, label %vector.ph
+
+vector.ph:
+ %n.vec = and i64 %n, -32
+ %broadcast.splatinsert = insertelement <16 x i8> poison, i8 %splat.scalar, i64 0
+ %broadcast.splat = shufflevector <16 x i8> %broadcast.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body:
+ %iv = phi i64 [ 0, %vector.ph ], [ %iv.next, %vector.body ]
+ %gep.p.iv = getelementptr inbounds nuw i8, ptr %p, i64 %iv
+ %gep.p.iv.16 = getelementptr inbounds nuw i8, ptr %gep.p.iv, i64 16
+ %wide.load = load <16 x i8>, ptr %gep.p.iv, align 1
+ %wide.load.2 = load <16 x i8>, ptr %gep.p.iv.16, align 1
+ %add.broadcast = add <16 x i8> %wide.load, %broadcast.splat
+ %add.broadcast.2 = add <16 x i8> %wide.load.2, %broadcast.splat
+ store <16 x i8> %add.broadcast, ptr %gep.p.iv, align 1
+ store <16 x i8> %add.broadcast.2, ptr %gep.p.iv.16, align 1
+ %iv.next = add nuw i64 %iv, 32
+ %exit.cond = icmp eq i64 %iv.next, %n.vec
+ br i1 %exit.cond, label %middle.block, label %vector.body, !llvm.loop !2
+
+middle.block:
+ %cmp.n = icmp eq i64 %n, %n.vec
+ br i1 %cmp.n, label %exit, label %scalar.remainder
+
+scalar.remainder:
+ %iv.scalar.loop = phi i64 [ %inc, %scalar.remainder ], [ %n.vec, %middle.block ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %p, i64 %iv.scalar.loop
+ %scalar.load = load i8, ptr %arrayidx, align 1
+ %add = add i8 %scalar.load, %splat.scalar
+ store i8 %add, ptr %arrayidx, align 1
+ %inc = add nuw i64 %iv.scalar.loop, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %exit, label %scalar.remainder, !llvm.loop !3
+
+exit:
+ ret void
+}
+
+!2 = distinct !{!2, !1}
+!3 = distinct !{!3, !1}
+
;.
; APPLE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
; APPLE: [[META1]] = !{!"llvm.loop.unroll.disable"}
; APPLE: [[LOOP2]] = distinct !{[[LOOP2]], [[META3:![0-9]+]]}
; APPLE: [[META3]] = !{!"llvm.loop.isvectorized", i32 1}
+; APPLE: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]]}
+; APPLE: [[LOOP5]] = distinct !{[[LOOP5]], [[META3]]}
;.
; CORTEXA55: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
; CORTEXA55: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CORTEXA55: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CORTEXA55: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll
new file mode 100644
index 0000000..811d055
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll
@@ -0,0 +1,603 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -p loop-unroll -mtriple riscv64 -mattr=+v,+f -S %s | FileCheck %s --check-prefixes=COMMON,CHECK
+; RUN: opt -p loop-unroll -mtriple=riscv64 -mcpu=sifive-s76 -S %s | FileCheck %s --check-prefixes=COMMON,SIFIVE
+
+define void @reverse(ptr %dst, ptr %src, i64 %len) {
+; CHECK-LABEL: define void @reverse(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i64 [[LEN]], [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[ARRAYIDX2]], align 16
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[LEN]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @reverse(
+; SIFIVE-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]]) #[[ATTR0:[0-9]+]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[TMP2:%.*]] = add i64 [[LEN]], -1
+; SIFIVE-NEXT: [[XTRAITER:%.*]] = and i64 [[LEN]], 7
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]]
+; SIFIVE: [[ENTRY_NEW]]:
+; SIFIVE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[LEN]], [[XTRAITER]]
+; SIFIVE-NEXT: br label %[[FOR_BODY:.*]]
+; SIFIVE: [[FOR_BODY]]:
+; SIFIVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_7:%.*]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_7:%.*]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = sub nsw i64 [[LEN]], [[IV]]
+; SIFIVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP0]]
+; SIFIVE-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV]]
+; SIFIVE-NEXT: store <4 x float> [[TMP1]], ptr [[ARRAYIDX2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
+; SIFIVE-NEXT: [[TMP4:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT]]
+; SIFIVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP4]]
+; SIFIVE-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX_1]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT]]
+; SIFIVE-NEXT: store <4 x float> [[TMP5]], ptr [[ARRAYIDX2_1]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2
+; SIFIVE-NEXT: [[TMP6:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_1]]
+; SIFIVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP6]]
+; SIFIVE-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX_2]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_1]]
+; SIFIVE-NEXT: store <4 x float> [[TMP7]], ptr [[ARRAYIDX2_2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3
+; SIFIVE-NEXT: [[TMP8:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_2]]
+; SIFIVE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP8]]
+; SIFIVE-NEXT: [[TMP9:%.*]] = load <4 x float>, ptr [[ARRAYIDX_3]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_2]]
+; SIFIVE-NEXT: store <4 x float> [[TMP9]], ptr [[ARRAYIDX2_3]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_3:%.*]] = add nuw nsw i64 [[IV]], 4
+; SIFIVE-NEXT: [[TMP10:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_3]]
+; SIFIVE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP10]]
+; SIFIVE-NEXT: [[TMP11:%.*]] = load <4 x float>, ptr [[ARRAYIDX_4]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_3]]
+; SIFIVE-NEXT: store <4 x float> [[TMP11]], ptr [[ARRAYIDX2_4]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_4:%.*]] = add nuw nsw i64 [[IV]], 5
+; SIFIVE-NEXT: [[TMP12:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_4]]
+; SIFIVE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP12]]
+; SIFIVE-NEXT: [[TMP13:%.*]] = load <4 x float>, ptr [[ARRAYIDX_5]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_4]]
+; SIFIVE-NEXT: store <4 x float> [[TMP13]], ptr [[ARRAYIDX2_5]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_5:%.*]] = add nuw nsw i64 [[IV]], 6
+; SIFIVE-NEXT: [[TMP14:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_5]]
+; SIFIVE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP14]]
+; SIFIVE-NEXT: [[TMP15:%.*]] = load <4 x float>, ptr [[ARRAYIDX_6]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_5]]
+; SIFIVE-NEXT: store <4 x float> [[TMP15]], ptr [[ARRAYIDX2_6]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_6:%.*]] = add nuw nsw i64 [[IV]], 7
+; SIFIVE-NEXT: [[TMP16:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_6]]
+; SIFIVE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP16]]
+; SIFIVE-NEXT: [[TMP17:%.*]] = load <4 x float>, ptr [[ARRAYIDX_7]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_6]]
+; SIFIVE-NEXT: store <4 x float> [[TMP17]], ptr [[ARRAYIDX2_7]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8
+; SIFIVE-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8
+; SIFIVE-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]]
+; SIFIVE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_BODY]]
+; SIFIVE: [[EXIT_UNR_LCSSA_LOOPEXIT]]:
+; SIFIVE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: br label %[[EXIT_UNR_LCSSA]]
+; SIFIVE: [[EXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
+; SIFIVE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[EXIT:.*]]
+; SIFIVE: [[FOR_BODY_EPIL_PREHEADER]]:
+; SIFIVE-NEXT: br label %[[FOR_BODY_EPIL:.*]]
+; SIFIVE: [[FOR_BODY_EPIL]]:
+; SIFIVE-NEXT: [[TMP18:%.*]] = sub nsw i64 [[LEN]], [[IV_UNR]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP18]]
+; SIFIVE-NEXT: [[TMP19:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_UNR]]
+; SIFIVE-NEXT: store <4 x float> [[TMP19]], ptr [[ARRAYIDX2_EPIL]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[IV_UNR]], 1
+; SIFIVE-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP]], label %[[FOR_BODY_EPIL_1:.*]], label %[[EXIT_EPILOG_LCSSA:.*]]
+; SIFIVE: [[FOR_BODY_EPIL_1]]:
+; SIFIVE-NEXT: [[TMP20:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP20]]
+; SIFIVE-NEXT: [[TMP21:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_1]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL]]
+; SIFIVE-NEXT: store <4 x float> [[TMP21]], ptr [[ARRAYIDX2_EPIL_1]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[IV_UNR]], 2
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_1]], label %[[FOR_BODY_EPIL_2:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_2]]:
+; SIFIVE-NEXT: [[TMP22:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_1]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP22]]
+; SIFIVE-NEXT: [[TMP23:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_2]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_1]]
+; SIFIVE-NEXT: store <4 x float> [[TMP23]], ptr [[ARRAYIDX2_EPIL_2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_2:%.*]] = add nuw nsw i64 [[IV_UNR]], 3
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_2:%.*]] = icmp ne i64 3, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_2]], label %[[FOR_BODY_EPIL_3:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_3]]:
+; SIFIVE-NEXT: [[TMP24:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_2]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_3:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP24]]
+; SIFIVE-NEXT: [[TMP25:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_3]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_3:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_2]]
+; SIFIVE-NEXT: store <4 x float> [[TMP25]], ptr [[ARRAYIDX2_EPIL_3]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_3:%.*]] = add nuw nsw i64 [[IV_UNR]], 4
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_3:%.*]] = icmp ne i64 4, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_3]], label %[[FOR_BODY_EPIL_4:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_4]]:
+; SIFIVE-NEXT: [[TMP26:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_3]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_4:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP26]]
+; SIFIVE-NEXT: [[TMP27:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_4]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_4:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_3]]
+; SIFIVE-NEXT: store <4 x float> [[TMP27]], ptr [[ARRAYIDX2_EPIL_4]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_4:%.*]] = add nuw nsw i64 [[IV_UNR]], 5
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_4:%.*]] = icmp ne i64 5, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_4]], label %[[FOR_BODY_EPIL_5:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_5]]:
+; SIFIVE-NEXT: [[TMP28:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_4]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_5:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP28]]
+; SIFIVE-NEXT: [[TMP29:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_5]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_5:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_4]]
+; SIFIVE-NEXT: store <4 x float> [[TMP29]], ptr [[ARRAYIDX2_EPIL_5]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_5:%.*]] = add nuw nsw i64 [[IV_UNR]], 6
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_5:%.*]] = icmp ne i64 6, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_5]], label %[[FOR_BODY_EPIL_6:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_6]]:
+; SIFIVE-NEXT: [[TMP30:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_5]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_6:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP30]]
+; SIFIVE-NEXT: [[TMP31:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_6]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_6:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_5]]
+; SIFIVE-NEXT: store <4 x float> [[TMP31]], ptr [[ARRAYIDX2_EPIL_6]], align 16
+; SIFIVE-NEXT: br label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[EXIT_EPILOG_LCSSA]]:
+; SIFIVE-NEXT: br label %[[EXIT]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %1 = sub nsw i64 %len, %iv
+ %arrayidx = getelementptr inbounds <4 x float>, ptr %src, i64 %1
+ %2 = load <4 x float>, ptr %arrayidx, align 16
+ %arrayidx2 = getelementptr inbounds nuw <4 x float>, ptr %dst, i64 %iv
+ store <4 x float> %2, ptr %arrayidx2, align 16
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %len
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit: ; preds = %for.body, %entry
+ ret void
+}
+
+
+define void @saxpy_tripcount8_full_unroll(ptr %dst, ptr %src, float %a) {
+; COMMON-LABEL: define void @saxpy_tripcount8_full_unroll(
+; COMMON-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; COMMON-NEXT: [[ENTRY:.*:]]
+; COMMON-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; COMMON-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; COMMON-NEXT: br label %[[VECTOR_BODY:.*]]
+; COMMON: [[VECTOR_BODY]]:
+; COMMON-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[SRC]], align 4
+; COMMON-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[DST]], align 4
+; COMMON-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; COMMON-NEXT: store <4 x float> [[TMP0]], ptr [[DST]], align 4
+; COMMON-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 4
+; COMMON-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; COMMON-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 4
+; COMMON-NEXT: [[WIDE_LOAD12_1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; COMMON-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_1]], <4 x float> [[WIDE_LOAD12_1]])
+; COMMON-NEXT: store <4 x float> [[TMP3]], ptr [[TMP2]], align 4
+; COMMON-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 8
+ br i1 %3, label %exit, label %vector.body
+
+exit: ; preds = %vector.body
+ ret void
+}
+
+
+define void @saxpy_tripcount1K_av0(ptr %dst, ptr %src, float %a) {
+; CHECK-LABEL: define void @saxpy_tripcount1K_av0(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @saxpy_tripcount1K_av0(
+; SIFIVE-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; SIFIVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; SIFIVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; SIFIVE: [[VECTOR_BODY]]:
+; SIFIVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; SIFIVE-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; SIFIVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; SIFIVE-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; SIFIVE-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; SIFIVE-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT1:%.*]] = add nuw nsw i64 [[INDEX]], 4
+; SIFIVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT1]]
+; SIFIVE-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP12]], align 4
+; SIFIVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT1]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_1:%.*]] = load <4 x float>, ptr [[TMP4]], align 4
+; SIFIVE-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_1]], <4 x float> [[WIDE_LOAD12_1]])
+; SIFIVE-NEXT: store <4 x float> [[TMP5]], ptr [[TMP4]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT_1:%.*]] = add nuw nsw i64 [[INDEX]], 8
+; SIFIVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT_1]]
+; SIFIVE-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
+; SIFIVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT_1]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_2:%.*]] = load <4 x float>, ptr [[TMP7]], align 4
+; SIFIVE-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_2]], <4 x float> [[WIDE_LOAD12_2]])
+; SIFIVE-NEXT: store <4 x float> [[TMP8]], ptr [[TMP7]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT_2:%.*]] = add nuw nsw i64 [[INDEX]], 12
+; SIFIVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT_2]]
+; SIFIVE-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x float>, ptr [[TMP9]], align 4
+; SIFIVE-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT_2]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_3:%.*]] = load <4 x float>, ptr [[TMP10]], align 4
+; SIFIVE-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_3]], <4 x float> [[WIDE_LOAD12_3]])
+; SIFIVE-NEXT: store <4 x float> [[TMP11]], ptr [[TMP10]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT]] = add nuw nsw i64 [[INDEX]], 16
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 1024
+ br i1 %3, label %exit, label %vector.body
+
+exit: ; preds = %vector.body
+ ret void
+}
+
+
+define void @saxpy_tripcount1K_av1(ptr %dst, ptr %src, float %a) {
+; COMMON-LABEL: define void @saxpy_tripcount1K_av1(
+; COMMON-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[ENTRY:.*]]:
+; COMMON-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; COMMON-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; COMMON-NEXT: br label %[[VECTOR_BODY:.*]]
+; COMMON: [[VECTOR_BODY]]:
+; COMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; COMMON-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; COMMON-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; COMMON-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; COMMON-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; COMMON-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; COMMON-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; COMMON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; COMMON-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; COMMON: [[EXIT]]:
+; COMMON-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 1024
+ br i1 %3, label %exit, label %vector.body, !llvm.loop !0
+
+exit: ; preds = %vector.body
+ ret void
+}
+!0 = !{!0, !1}
+!1 = !{!"llvm.loop.isvectorized", i32 1}
+
+; On SiFive we should runtime unroll the scalar epilogue loop, but not the
+; vector loop.
+define void @scalar_epilogue(ptr %p, i8 %splat.scalar, i64 %n) {
+; CHECK-LABEL: define void @scalar_epilogue(
+; CHECK-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[GEP_P_IV:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_P_IV_16:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_P_IV]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[GEP_P_IV]], align 1
+; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <16 x i8>, ptr [[GEP_P_IV_16]], align 1
+; CHECK-NEXT: [[ADD_BROADCAST:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[ADD_BROADCAST_2:%.*]] = add <16 x i8> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <16 x i8> [[ADD_BROADCAST]], ptr [[GEP_P_IV]], align 1
+; CHECK-NEXT: store <16 x i8> [[ADD_BROADCAST_2]], ptr [[GEP_P_IV_16]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 32
+; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; CHECK: [[SCALAR_REMAINDER_PREHEADER]]:
+; CHECK-NEXT: [[IV_SCALAR_LOOP_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; CHECK: [[SCALAR_REMAINDER]]:
+; CHECK-NEXT: [[IV_SCALAR_LOOP:%.*]] = phi i64 [ [[INC:%.*]], %[[SCALAR_REMAINDER]] ], [ [[IV_SCALAR_LOOP_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP]]
+; CHECK-NEXT: [[SCALAR_LOAD:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SCALAR_LOAD]], [[SPLAT_SCALAR]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[INC]] = add nuw i64 [[IV_SCALAR_LOOP]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @scalar_epilogue(
+; SIFIVE-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; SIFIVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; SIFIVE: [[VECTOR_PH]]:
+; SIFIVE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; SIFIVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; SIFIVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; SIFIVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; SIFIVE: [[VECTOR_BODY]]:
+; SIFIVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SIFIVE-NEXT: [[GEP_P_IV:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV]]
+; SIFIVE-NEXT: [[GEP_P_IV_16:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_P_IV]], i64 16
+; SIFIVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[GEP_P_IV]], align 1
+; SIFIVE-NEXT: [[WIDE_LOAD_2:%.*]] = load <16 x i8>, ptr [[GEP_P_IV_16]], align 1
+; SIFIVE-NEXT: [[ADD_BROADCAST:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; SIFIVE-NEXT: [[ADD_BROADCAST_2:%.*]] = add <16 x i8> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]]
+; SIFIVE-NEXT: store <16 x i8> [[ADD_BROADCAST]], ptr [[GEP_P_IV]], align 1
+; SIFIVE-NEXT: store <16 x i8> [[ADD_BROADCAST_2]], ptr [[GEP_P_IV_16]], align 1
+; SIFIVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 32
+; SIFIVE-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; SIFIVE-NEXT: br i1 [[EXIT_COND]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; SIFIVE: [[MIDDLE_BLOCK]]:
+; SIFIVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; SIFIVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; SIFIVE: [[SCALAR_REMAINDER_PREHEADER]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[TMP1:%.*]] = add i64 [[N]], -1
+; SIFIVE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7
+; SIFIVE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[SCALAR_REMAINDER_PROL_PREHEADER:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_PREHEADER]]:
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL]]:
+; SIFIVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[SCALAR_LOAD:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; SIFIVE-NEXT: [[ADD:%.*]] = add i8 [[SCALAR_LOAD]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; SIFIVE-NEXT: [[INC:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 1
+; SIFIVE-NEXT: [[PROL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP]], label %[[SCALAR_REMAINDER_PROL_1:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_1]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_1:%.*]] = load i8, ptr [[ARRAYIDX_PROL_1]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_1:%.*]] = add i8 [[SCALAR_LOAD_PROL_1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_1]], ptr [[ARRAYIDX_PROL_1]], align 1
+; SIFIVE-NEXT: [[INC_PROL_1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 2
+; SIFIVE-NEXT: [[PROL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_1]], label %[[SCALAR_REMAINDER_PROL_2:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_2]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_2:%.*]] = load i8, ptr [[ARRAYIDX_PROL_2]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_2:%.*]] = add i8 [[SCALAR_LOAD_PROL_2]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_2]], ptr [[ARRAYIDX_PROL_2]], align 1
+; SIFIVE-NEXT: [[INC_PROL_2:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 3
+; SIFIVE-NEXT: [[PROL_ITER_CMP_2:%.*]] = icmp ne i64 3, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_2]], label %[[SCALAR_REMAINDER_PROL_3:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_3]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_2]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_3:%.*]] = load i8, ptr [[ARRAYIDX_PROL_3]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_3:%.*]] = add i8 [[SCALAR_LOAD_PROL_3]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_3]], ptr [[ARRAYIDX_PROL_3]], align 1
+; SIFIVE-NEXT: [[INC_PROL_3:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 4
+; SIFIVE-NEXT: [[PROL_ITER_CMP_3:%.*]] = icmp ne i64 4, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_3]], label %[[SCALAR_REMAINDER_PROL_4:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_4]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_4:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_3]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_4:%.*]] = load i8, ptr [[ARRAYIDX_PROL_4]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_4:%.*]] = add i8 [[SCALAR_LOAD_PROL_4]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_4]], ptr [[ARRAYIDX_PROL_4]], align 1
+; SIFIVE-NEXT: [[INC_PROL_4:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 5
+; SIFIVE-NEXT: [[PROL_ITER_CMP_4:%.*]] = icmp ne i64 5, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_4]], label %[[SCALAR_REMAINDER_PROL_5:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_5]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_5:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_4]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_5:%.*]] = load i8, ptr [[ARRAYIDX_PROL_5]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_5:%.*]] = add i8 [[SCALAR_LOAD_PROL_5]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_5]], ptr [[ARRAYIDX_PROL_5]], align 1
+; SIFIVE-NEXT: [[INC_PROL_5:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 6
+; SIFIVE-NEXT: [[PROL_ITER_CMP_5:%.*]] = icmp ne i64 6, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_5]], label %[[SCALAR_REMAINDER_PROL_6:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_6]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_6:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_5]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_6:%.*]] = load i8, ptr [[ARRAYIDX_PROL_6]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_6:%.*]] = add i8 [[SCALAR_LOAD_PROL_6]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_6]], ptr [[ARRAYIDX_PROL_6]], align 1
+; SIFIVE-NEXT: [[INC_PROL_6:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 7
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP_UNR_PH:%.*]] = phi i64 [ [[INC]], %[[SCALAR_REMAINDER_PROL]] ], [ [[INC_PROL_1]], %[[SCALAR_REMAINDER_PROL_1]] ], [ [[INC_PROL_2]], %[[SCALAR_REMAINDER_PROL_2]] ], [ [[INC_PROL_3]], %[[SCALAR_REMAINDER_PROL_3]] ], [ [[INC_PROL_4]], %[[SCALAR_REMAINDER_PROL_4]] ], [ [[INC_PROL_5]], %[[SCALAR_REMAINDER_PROL_5]] ], [ [[INC_PROL_6]], %[[SCALAR_REMAINDER_PROL_6]] ]
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_LOOPEXIT]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP_UNR:%.*]] = phi i64 [ [[IV_SCALAR_LOOP]], %[[SCALAR_REMAINDER_PREHEADER]] ], [ [[IV_SCALAR_LOOP_UNR_PH]], %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]] ]
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER_NEW:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PREHEADER_NEW]]:
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; SIFIVE: [[SCALAR_REMAINDER]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP1:%.*]] = phi i64 [ [[IV_SCALAR_LOOP_UNR]], %[[SCALAR_REMAINDER_PREHEADER_NEW]] ], [ [[INC_7:%.*]], %[[SCALAR_REMAINDER]] ]
+; SIFIVE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; SIFIVE-NEXT: [[ADD1:%.*]] = add i8 [[SCALAR_LOAD1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD1]], ptr [[ARRAYIDX1]], align 1
+; SIFIVE-NEXT: [[INC1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 1
+; SIFIVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_1:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; SIFIVE-NEXT: [[ADD_1:%.*]] = add i8 [[SCALAR_LOAD_1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX_1]], align 1
+; SIFIVE-NEXT: [[INC_1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 2
+; SIFIVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_2:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; SIFIVE-NEXT: [[ADD_2:%.*]] = add i8 [[SCALAR_LOAD_2]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_2]], ptr [[ARRAYIDX_2]], align 1
+; SIFIVE-NEXT: [[INC_2:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 3
+; SIFIVE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_2]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_3:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; SIFIVE-NEXT: [[ADD_3:%.*]] = add i8 [[SCALAR_LOAD_3]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_3]], ptr [[ARRAYIDX_3]], align 1
+; SIFIVE-NEXT: [[INC_3:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 4
+; SIFIVE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_3]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_4:%.*]] = load i8, ptr [[ARRAYIDX_4]], align 1
+; SIFIVE-NEXT: [[ADD_4:%.*]] = add i8 [[SCALAR_LOAD_4]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_4]], ptr [[ARRAYIDX_4]], align 1
+; SIFIVE-NEXT: [[INC_4:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 5
+; SIFIVE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_4]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_5:%.*]] = load i8, ptr [[ARRAYIDX_5]], align 1
+; SIFIVE-NEXT: [[ADD_5:%.*]] = add i8 [[SCALAR_LOAD_5]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_5]], ptr [[ARRAYIDX_5]], align 1
+; SIFIVE-NEXT: [[INC_5:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 6
+; SIFIVE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_5]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_6:%.*]] = load i8, ptr [[ARRAYIDX_6]], align 1
+; SIFIVE-NEXT: [[ADD_6:%.*]] = add i8 [[SCALAR_LOAD_6]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_6]], ptr [[ARRAYIDX_6]], align 1
+; SIFIVE-NEXT: [[INC_6:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 7
+; SIFIVE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_6]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_7:%.*]] = load i8, ptr [[ARRAYIDX_7]], align 1
+; SIFIVE-NEXT: [[ADD_7:%.*]] = add i8 [[SCALAR_LOAD_7]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_7]], ptr [[ARRAYIDX_7]], align 1
+; SIFIVE-NEXT: [[INC_7]] = add nuw i64 [[IV_SCALAR_LOOP1]], 8
+; SIFIVE-NEXT: [[EXITCOND_NOT_7:%.*]] = icmp eq i64 [[INC_7]], [[N]]
+; SIFIVE-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[EXIT_LOOPEXIT_UNR_LCSSA:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; SIFIVE: [[EXIT_LOOPEXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: br label %[[EXIT_LOOPEXIT]]
+; SIFIVE: [[EXIT_LOOPEXIT]]:
+; SIFIVE-NEXT: br label %[[EXIT]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry:
+ %min.iters.check = icmp ult i64 %n, 32
+ br i1 %min.iters.check, label %scalar.remainder, label %vector.ph
+
+vector.ph:
+ %n.vec = and i64 %n, -32
+ %broadcast.splatinsert = insertelement <16 x i8> poison, i8 %splat.scalar, i64 0
+ %broadcast.splat = shufflevector <16 x i8> %broadcast.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body:
+ %iv = phi i64 [ 0, %vector.ph ], [ %iv.next, %vector.body ]
+ %gep.p.iv = getelementptr inbounds nuw i8, ptr %p, i64 %iv
+ %gep.p.iv.16 = getelementptr inbounds nuw i8, ptr %gep.p.iv, i64 16
+ %wide.load = load <16 x i8>, ptr %gep.p.iv, align 1
+ %wide.load.2 = load <16 x i8>, ptr %gep.p.iv.16, align 1
+ %add.broadcast = add <16 x i8> %wide.load, %broadcast.splat
+ %add.broadcast.2 = add <16 x i8> %wide.load.2, %broadcast.splat
+ store <16 x i8> %add.broadcast, ptr %gep.p.iv, align 1
+ store <16 x i8> %add.broadcast.2, ptr %gep.p.iv.16, align 1
+ %iv.next = add nuw i64 %iv, 32
+ %exit.cond = icmp eq i64 %iv.next, %n.vec
+ br i1 %exit.cond, label %middle.block, label %vector.body, !llvm.loop !2
+
+middle.block:
+ %cmp.n = icmp eq i64 %n, %n.vec
+ br i1 %cmp.n, label %exit, label %scalar.remainder
+
+scalar.remainder:
+ %iv.scalar.loop = phi i64 [ %inc, %scalar.remainder ], [ %n.vec, %middle.block ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %p, i64 %iv.scalar.loop
+ %scalar.load = load i8, ptr %arrayidx, align 1
+ %add = add i8 %scalar.load, %splat.scalar
+ store i8 %add, ptr %arrayidx, align 1
+ %inc = add nuw i64 %iv.scalar.loop, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %exit, label %scalar.remainder, !llvm.loop !3
+
+exit:
+ ret void
+}
+
+!2 = distinct !{!2, !1}
+!3 = distinct !{!3, !1}
+
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
+; SIFIVE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; SIFIVE: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; SIFIVE: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; SIFIVE: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
index 011b823..a7ec749 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
@@ -27,7 +27,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> poison)
; CHECK-NEXT: [[TMP12:%.*]] = select fast <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> zeroinitializer
; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP12]])
-; CHECK-NEXT: [[TMP14]] = fadd fast float [[TMP13]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP14]] = fadd fast float [[VEC_PHI]], [[TMP13]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
index 3d81541..e555785 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
@@ -33,9 +33,9 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP10]], align 8
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i64>, ptr [[TMP15]], align 8
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP17]] = and i64 [[TMP16]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP17]] = and i64 [[VEC_PHI]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD3]])
-; CHECK-NEXT: [[TMP19]] = and i64 [[TMP18]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP19]] = and i64 [[VEC_PHI2]], [[TMP18]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP21]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -59,7 +59,7 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX7]]
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x i64>, ptr [[TMP24]], align 8
; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> [[WIDE_LOAD9]])
-; CHECK-NEXT: [[TMP27]] = and i64 [[TMP26]], [[VEC_PHI8]]
+; CHECK-NEXT: [[TMP27]] = and i64 [[VEC_PHI8]], [[TMP26]]
; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], 2
; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC5]]
; CHECK-NEXT: br i1 [[TMP28]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
index fc86e3a..f4982e6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
@@ -88,7 +88,7 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-IN-LOOP-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer
; CHECK-IN-LOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP13]])
-; CHECK-IN-LOOP-NEXT: [[TMP15]] = add i32 [[TMP14]], [[VEC_PHI]]
+; CHECK-IN-LOOP-NEXT: [[TMP15]] = add i32 [[VEC_PHI]], [[TMP14]]
; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP17]]
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
@@ -349,7 +349,7 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-IN-LOOP-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP14]], i32 4, <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> poison)
; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD1]], <vscale x 4 x i32> zeroinitializer
; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP17]])
-; CHECK-IN-LOOP-NEXT: [[TMP19]] = xor i32 [[TMP18]], [[VEC_PHI]]
+; CHECK-IN-LOOP-NEXT: [[TMP19]] = xor i32 [[VEC_PHI]], [[TMP18]]
; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP21]]
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP22:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
index bc02595..9f2c70e 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
@@ -17,7 +17,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -66,11 +66,11 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[VEC_IND]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD1]])
-; CHECK-NEXT: [[TMP8]] = add i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[TMP8]] = add i32 [[TMP6]], [[TMP7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
index f1bee3b..83cb325 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
@@ -29,7 +29,7 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea
; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP6]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP7]])
-; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -109,7 +109,7 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read
; CHECK-NEXT: [[TMP7:%.*]] = mul nsw <16 x i32> [[TMP6]], [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP7]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
-; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -184,7 +184,7 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
index e27b028..27e87bc 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
@@ -23,10 +23,10 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[WIDE_LOAD2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP9]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -96,10 +96,10 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP14:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD4]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[TMP14]] to <4 x i64>
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP9]])
-; CHECK-NEXT: [[TMP11]] = add i64 [[TMP10]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP11]] = add i64 [[VEC_PHI1]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index ddd334d..658b9a4 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -62,7 +62,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -127,7 +127,7 @@ define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -192,7 +192,7 @@ define i64 @add_i8_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -254,7 +254,7 @@ define i32 @add_i32_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -300,7 +300,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP1]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
@@ -347,7 +347,7 @@ define i32 @add_i8_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP1]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
@@ -392,7 +392,7 @@ define signext i16 @add_i16_i16(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP0]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i16 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i16 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
@@ -438,7 +438,7 @@ define signext i16 @add_i8_i16(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i16>
; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP1]], <16 x i16> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i16 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i16 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
@@ -483,7 +483,7 @@ define zeroext i8 @add_i8_i8(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i8 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i8 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
@@ -577,7 +577,7 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
@@ -652,7 +652,7 @@ define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <8 x i64> [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
@@ -731,7 +731,7 @@ define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw <8 x i64> [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
@@ -807,7 +807,7 @@ define i32 @mla_i32_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -860,7 +860,7 @@ define i32 @mla_i16_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
@@ -915,7 +915,7 @@ define i32 @mla_i8_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <16 x i32> [[TMP2]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP4]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
@@ -968,7 +968,7 @@ define signext i16 @mla_i16_i16(ptr nocapture readonly %x, ptr nocapture readonl
; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i16> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> [[TMP2]], <8 x i16> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i16 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i16 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
@@ -1021,7 +1021,7 @@ define signext i16 @mla_i8_i16(ptr nocapture readonly %x, ptr nocapture readonly
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw <16 x i16> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP4]], <16 x i16> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i16 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i16 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
@@ -1074,7 +1074,7 @@ define zeroext i8 @mla_i8_i8(ptr nocapture readonly %x, ptr nocapture readonly %
; CHECK-NEXT: [[TMP2:%.*]] = mul <16 x i8> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i8 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i8 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
@@ -1127,7 +1127,7 @@ define i32 @red_mla_ext_s8_s16_s32(ptr noalias nocapture readonly %A, ptr noalia
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
@@ -1190,7 +1190,7 @@ define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noali
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i32> [[TMP4]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
@@ -1275,7 +1275,7 @@ define i32 @red_mla_u8_s8_u32(ptr noalias nocapture readonly %A, ptr noalias noc
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP4]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
@@ -1339,9 +1339,9 @@ define i32 @reduction_interleave_group(i32 %n, ptr %arr) #0 {
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC1]])
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC]])
-; CHECK-NEXT: [[TMP9]] = add i32 [[TMP8]], [[TMP7]]
+; CHECK-NEXT: [[TMP9]] = add i32 [[TMP7]], [[TMP8]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
@@ -1412,7 +1412,7 @@ define i32 @mla_i8_i32_multiuse(ptr nocapture readonly %x, ptr nocapture readonl
; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i32> [[TMP7]], [[TMP7]]
; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP2]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
@@ -1460,7 +1460,7 @@ define i64 @mla_xx_sext_zext(ptr nocapture noundef readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <8 x i64> [[TMP1]], [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
@@ -1528,10 +1528,10 @@ define i64 @mla_and_add_together_16_64(ptr nocapture noundef readonly %x, i32 no
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i64> [[TMP4]], [[TMP4]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP2]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP10:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP10]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
@@ -1678,11 +1678,11 @@ define i64 @test_std_q31(ptr %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> [[WIDE_LOAD]], splat (i32 8)
; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i64 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i64 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i64> [[TMP5]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]])
-; CHECK-NEXT: [[TMP8]] = add i64 [[TMP7]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP8]] = add i64 [[VEC_PHI1]], [[TMP7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP37:![0-9]+]]
@@ -1770,12 +1770,12 @@ define i64 @test_fir_q15(ptr %x, ptr %y, i32 %n) #0 {
; CHECK-NEXT: [[TMP7:%.*]] = sext <8 x i16> [[STRIDED_VEC]] to <8 x i64>
; CHECK-NEXT: [[TMP8:%.*]] = mul nsw <8 x i64> [[TMP6]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP8]])
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = sext <8 x i16> [[STRIDED_VEC4]] to <8 x i64>
; CHECK-NEXT: [[TMP12:%.*]] = sext <8 x i16> [[STRIDED_VEC1]] to <8 x i64>
; CHECK-NEXT: [[TMP13:%.*]] = mul nsw <8 x i64> [[TMP11]], [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP13]])
-; CHECK-NEXT: [[TMP16]] = add i64 [[TMP15]], [[TMP10]]
+; CHECK-NEXT: [[TMP16]] = add i64 [[TMP10]], [[TMP15]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
index 21266e5..162440a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFBFMIN
-; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN-PREDICATED
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -S | FileCheck %s -check-prefix=ZVFBFMIN
define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
@@ -22,24 +22,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-ZVFBFMIN: [[EXIT]]:
; NO-ZVFBFMIN-NEXT: ret void
;
-; NO-ZVFBFMIN-PREDICATED-LABEL: define void @fadd(
-; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd bfloat [[X]], [[Y]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
-; NO-ZVFBFMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: ret void
-;
; ZVFBFMIN-LABEL: define void @fadd(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
@@ -152,54 +134,6 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; NO-ZVFBFMIN: [[EXIT]]:
; NO-ZVFBFMIN-NEXT: ret void
;
-; NO-ZVFBFMIN-PREDICATED-LABEL: define void @vfwmaccbf16.vv(
-; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[VECTOR_PH]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[VECTOR_BODY:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[VECTOR_BODY]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <4 x bfloat>, ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = load <4 x bfloat>, ptr [[B_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = load <4 x float>, ptr [[C_GEP]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP4:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD]] to <4 x float>
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP5:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD3]] to <4 x float>
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[TMP4]], <4 x float> [[TMP5]], <4 x float> [[WIDE_MASKED_LOAD4]])
-; NO-ZVFBFMIN-PREDICATED-NEXT: store <4 x float> [[TMP6]], ptr [[C_GEP]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[I]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; NO-ZVFBFMIN-PREDICATED: [[MIDDLE_BLOCK]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
-; NO-ZVFBFMIN-PREDICATED: [[SCALAR_PH]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP1:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP1:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP1:%.*]] = getelementptr float, ptr [[C]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP1]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP1]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = load float, ptr [[C_GEP1]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X_EXT:%.*]] = fpext bfloat [[X]] to float
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y_EXT:%.*]] = fpext bfloat [[Y]] to float
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X_EXT]], float [[Y_EXT]], float [[Z]])
-; NO-ZVFBFMIN-PREDICATED-NEXT: store float [[FMULADD]], ptr [[C_GEP1]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I1]], 1
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
-; NO-ZVFBFMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: ret void
-;
; ZVFBFMIN-LABEL: define void @vfwmaccbf16.vv(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
@@ -274,21 +208,3 @@ loop:
exit:
ret void
}
-;.
-; NO-ZVFBFMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; NO-ZVFBFMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; NO-ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; NO-ZVFBFMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
-; NO-ZVFBFMIN-PREDICATED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; NO-ZVFBFMIN-PREDICATED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; NO-ZVFBFMIN-PREDICATED: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; NO-ZVFBFMIN-PREDICATED: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
-; ZVFBFMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; ZVFBFMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; ZVFBFMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; ZVFBFMIN: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; ZVFBFMIN: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index f654238..5f13089 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -25,7 +25,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -35,6 +35,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VEC_IND]], ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
index 53e43e1..effaf57 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFHMIN
-; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN-PREDICATED
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -S | FileCheck %s -check-prefix=ZVFHMIN
define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
@@ -22,24 +22,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-ZVFHMIN: [[EXIT]]:
; NO-ZVFHMIN-NEXT: ret void
;
-; NO-ZVFHMIN-PREDICATED-LABEL: define void @fadd(
-; NO-ZVFHMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; NO-ZVFHMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFHMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]]
-; NO-ZVFHMIN-PREDICATED-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; NO-ZVFHMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFHMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
-; NO-ZVFHMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: ret void
-;
; ZVFHMIN-LABEL: define void @fadd(
; ZVFHMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
@@ -86,6 +68,23 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
+; NO-ZVFHMIN-PREDICATED-LABEL: define void @fadd(
+; NO-ZVFHMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; NO-ZVFHMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
+; NO-ZVFHMIN-PREDICATED: [[LOOP]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]]
+; NO-ZVFHMIN-PREDICATED-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
+; NO-ZVFHMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
+; NO-ZVFHMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-ZVFHMIN-PREDICATED: [[EXIT]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: ret void
entry:
br label %loop
loop:
@@ -102,9 +101,3 @@ loop:
exit:
ret void
}
-;.
-; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; ZVFHMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; ZVFHMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 8a2ff1b..6e2434a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -85,7 +85,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP7]], align 2
; INLOOP-NEXT: [[TMP9:%.*]] = sext <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i32>
; INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP9]])
-; INLOOP-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI]]
+; INLOOP-NEXT: [[TMP11]] = add i32 [[VEC_PHI]], [[TMP10]]
; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
; INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -132,7 +132,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i32 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr align 2 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
@@ -140,6 +140,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP-NEXT: [[VP_OP:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP9]]
; IF-EVL-OUTLOOP-NEXT: [[TMP10]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP5]])
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP5]]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -185,7 +186,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = phi i32 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP5]], i32 8, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
@@ -193,6 +194,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vp.reduce.add.nxv8i32(i32 0, <vscale x 8 x i32> [[TMP14]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-INLOOP-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i32 [[TMP5]], [[TMP6]]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -350,7 +352,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -359,6 +361,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -398,7 +401,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -406,6 +409,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP13]], i32 [[VEC_PHI]])
; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
index 02eee7a..6f20376 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
@@ -117,7 +117,7 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL: vector.body:
; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = phi i32 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
@@ -139,6 +139,7 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP13]]
; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1024
; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -325,7 +326,7 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL: vector.body:
; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = phi i32 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
@@ -363,6 +364,7 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP25]]
; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP18]], <vscale x 16 x ptr> align 1 [[TMP26]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP27:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1024
; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll
new file mode 100644
index 0000000..c5396f2
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
+; RUN: opt -mtriple=riscv64 -mattr=+v -passes=loop-vectorize \
+; RUN: -scalable-vectorization=off -enable-masked-interleaved-mem-accesses \
+; RUN: -force-vector-interleave=1 -riscv-v-vector-bits-min=1024 -S < %s | FileCheck %s
+
+define void @store_factor_2_with_tail_gap(i64 %n, ptr %a) {
+; CHECK-LABEL: define void @store_factor_2_with_tail_gap(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i64> [[VEC_IND]], <16 x i64> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <32 x i64> [[TMP2]], <32 x i64> poison, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT: call void @llvm.masked.store.v32i64.p0(<32 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], i32 8, <32 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>)
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP4]]
+; CHECK-NEXT: store i64 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %0 = shl nsw i64 %iv, 1
+ %arrayidx = getelementptr inbounds i64, ptr %a, i64 %0
+ store i64 %iv, ptr %arrayidx, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
index b82b7f3..01df436 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -32,7 +32,7 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 9, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP11]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
@@ -48,6 +48,7 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[TMP17:%.*]] = trunc <vscale x 2 x i32> [[TMP16]] to <vscale x 2 x i8>
; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0(<vscale x 2 x i8> [[TMP17]], <vscale x 2 x ptr> align 1 [[BROADCAST_SPLAT4]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP11]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT8]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
new file mode 100644
index 0000000..554ce7b
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
@@ -0,0 +1,1481 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
+; RUN: opt < %s -p loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s
+
+; Reduction can be vectorized
+
+; ADD
+
+define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @add(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %add = add nsw i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %add
+}
+
+; OR
+
+define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @or(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[OR_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %or, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %or = or i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %or
+}
+
+; AND
+
+define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @and(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[AND_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %and, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %and = and i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %and
+}
+
+; XOR
+
+define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @xor(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %xor, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %xor = xor i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %xor
+}
+
+; SMIN
+
+define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @smin(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %cmp.i = icmp slt i32 %0, %sum.010
+ %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret i32 %.sroa.speculated
+}
+
+; UMAX
+
+define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @umax(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %cmp.i = icmp ugt i32 %0, %sum.010
+ %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret i32 %.sroa.speculated
+}
+
+; FADD (FAST)
+
+define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
+; CHECK-LABEL: define float @fadd_fast(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %add = fadd fast float %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %add
+}
+
+define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfh" {
+; CHECK-LABEL: define half @fadd_fast_half_zvfh(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %add = fadd fast half %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %add
+}
+
+define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfhmin" {
+; CHECK-LABEL: define half @fadd_fast_half_zvfhmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2]] = fadd fast <16 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = fadd fast <16 x half> [[WIDE_LOAD2]], [[VEC_PHI1]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x half> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP6:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP6]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %add = fadd fast half %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %add
+}
+
+define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfbfmin" {
+; CHECK-LABEL: define bfloat @fadd_fast_bfloat(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2]] = fadd fast <16 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = fadd fast <16 x bfloat> [[WIDE_LOAD2]], [[VEC_PHI1]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x bfloat> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP6:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast bfloat [[TMP6]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi bfloat [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %add = fadd fast bfloat %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %add
+}
+
+; FMIN (FAST)
+
+define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
+; CHECK-LABEL: define float @fmin_fast(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %cmp.i = fcmp olt float %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %.sroa.speculated
+}
+
+define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
+; CHECK-LABEL: define half @fmin_fast_half_zvfhmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %cmp.i = fcmp olt half %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %.sroa.speculated
+}
+
+define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
+; CHECK-LABEL: define bfloat @fmin_fast_bfloat_zvfbfmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %cmp.i = fcmp olt bfloat %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %.sroa.speculated
+}
+
+; FMAX (FAST)
+
+define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
+; CHECK-LABEL: define float @fmax_fast(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %cmp.i = fcmp fast ogt float %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %.sroa.speculated
+}
+
+define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
+; CHECK-LABEL: define half @fmax_fast_half_zvfhmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %cmp.i = fcmp fast ogt half %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %.sroa.speculated
+}
+
+define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
+; CHECK-LABEL: define bfloat @fmax_fast_bfloat_zvfbfmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %cmp.i = fcmp fast ogt bfloat %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %.sroa.speculated
+}
+
+; Reduction cannot be vectorized
+
+; MUL
+
+define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @mul(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ <i32 2, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2]] = mul <8 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <8 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MUL:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP6]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %mul, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %mul = mul nsw i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %mul
+}
+
+; Note: This test was added to ensure we always check the legality of reductions before checking for memory dependencies
+define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @memory_dependence(
+; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ <i32 2, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <8 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP3]]
+; CHECK-NEXT: store <8 x i32> [[TMP2]], ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD1]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP5]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ [[MUL:%.*]], %[[FOR_BODY]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP8]]
+; CHECK-NEXT: [[ADD2:%.*]] = add nuw nsw i64 [[I]], 32
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD2]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP9]], [[SUM]]
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %sum = phi i32 [ %mul, %for.body ], [ 2, %entry ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %i
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 %i
+ %1 = load i32, ptr %arrayidx1, align 4
+ %add = add nsw i32 %1, %0
+ %add2 = add nuw nsw i64 %i, 32
+ %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %add2
+ store i32 %add, ptr %arrayidx3, align 4
+ %mul = mul nsw i32 %1, %sum
+ %inc = add nuw nsw i64 %i, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret i32 %mul
+}
+
+define float @fmuladd(ptr %a, ptr %b, i64 %n) {
+; CHECK-LABEL: define float @fmuladd(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
+; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv
+ %1 = load float, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %muladd
+}
+
+define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" {
+; CHECK-LABEL: define half @fmuladd_f16_zvfh(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP7]], align 4
+; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load half, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
+ %1 = load half, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %muladd
+}
+
+
+; We can't scalably vectorize reductions of f16 with zvfhmin or bf16 with zvfbfmin, so make sure we use fixed-length vectors instead.
+
+define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvfhmin" {
+; CHECK-LABEL: define half @fmuladd_f16_zvfhmin(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ <half 0xH0000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ splat (half 0xH8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds half, ptr [[TMP2]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x half>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x half>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP4]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD]], <16 x half> [[WIDE_LOAD3]], <16 x half> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP5]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]], <16 x half> [[VEC_PHI1]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP8:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP9:%.*]] = load half, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP8]], half [[TMP9]], half [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
+ %1 = load half, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %muladd
+}
+
+define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin" {
+; CHECK-LABEL: define bfloat @fmuladd_bf16(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ <bfloat 0xR0000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000>, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ splat (bfloat 0xR8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds bfloat, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds bfloat, ptr [[TMP2]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x bfloat>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x bfloat>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP4]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD]], <16 x bfloat> [[WIDE_LOAD3]], <16 x bfloat> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP5]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]], <16 x bfloat> [[VEC_PHI1]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP8:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds bfloat, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP9:%.*]] = load bfloat, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat [[TMP8]], bfloat [[TMP9]], bfloat [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi bfloat [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds bfloat, ptr %b, i64 %iv
+ %1 = load bfloat, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat %0, bfloat %1, bfloat %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %muladd
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+
+attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
+attributes #1 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfhmin,+zvfhmin"}
+attributes #2 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfbfmin,+zvfbfmin"}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll
deleted file mode 100644
index 695a0c3..0000000
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll
+++ /dev/null
@@ -1,729 +0,0 @@
-; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on \
-; RUN: -riscv-v-vector-bits-max=128 \
-; RUN: -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize \
-; RUN: -pass-remarks-missed=loop-vectorize -mtriple riscv64-linux-gnu \
-; RUN: -force-target-max-vector-interleave=2 -mattr=+v,+f -S 2>%t \
-; RUN: | FileCheck %s -check-prefix=CHECK
-; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARK
-
-; Reduction can be vectorized
-
-; ADD
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @add
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[ADD1:.*]] = add <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[ADD2:.*]] = add <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[ADD:.*]] = add <vscale x 8 x i32> %[[ADD2]], %[[ADD1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> %[[ADD]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %add = add nsw i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %add
-}
-
-; OR
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @or
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[OR1:.*]] = or <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[OR2:.*]] = or <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[OR:.*]] = or <vscale x 8 x i32> %[[OR2]], %[[OR1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> %[[OR]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %or, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %or = or i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %or
-}
-
-; AND
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @and
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[AND1:.*]] = and <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[AND2:.*]] = and <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[ABD:.*]] = and <vscale x 8 x i32> %[[ADD2]], %[[AND1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> %[[ADD]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %and, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %and = and i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %and
-}
-
-; XOR
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @xor
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[XOR1:.*]] = xor <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[XOR2:.*]] = xor <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[XOR:.*]] = xor <vscale x 8 x i32> %[[XOR2]], %[[XOR1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> %[[XOR]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %xor, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %xor = xor i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %xor
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-; SMIN
-
-define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @smin
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[ICMP1:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[ICMP2:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
-; CHECK-NEXT: call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %cmp.i = icmp slt i32 %0, %sum.010
- %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret i32 %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-; UMAX
-
-define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @umax
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[ICMP1:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[ICMP2:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
-; CHECK-NEXT: call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %cmp.i = icmp ugt i32 %0, %sum.010
- %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret i32 %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-; FADD (FAST)
-
-define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
-; CHECK-LABEL: @fadd_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
-; CHECK: %[[ADD1:.*]] = fadd fast <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[ADD2:.*]] = fadd fast <vscale x 8 x float> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[ADD:.*]] = fadd fast <vscale x 8 x float> %[[ADD2]], %[[ADD1]]
-; CHECK-NEXT: call fast float @llvm.vector.reduce.fadd.nxv8f32(float 0.000000e+00, <vscale x 8 x float> %[[ADD]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %add = fadd fast float %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret float %add
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfh" {
-; CHECK-LABEL: @fadd_fast_half_zvfh
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x half>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x half>
-; CHECK: %[[FADD1:.*]] = fadd fast <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[FADD2:.*]] = fadd fast <vscale x 8 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = fadd fast <vscale x 8 x half> %[[FADD2]], %[[FADD1]]
-; CHECK: call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %add = fadd fast half %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %add
-}
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfhmin" {
-; CHECK-LABEL: @fadd_fast_half_zvfhmin
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <16 x half>
-; CHECK: %[[LOAD2:.*]] = load <16 x half>
-; CHECK: %[[FADD1:.*]] = fadd fast <16 x half> %[[LOAD1]]
-; CHECK: %[[FADD2:.*]] = fadd fast <16 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = fadd fast <16 x half> %[[FADD2]], %[[FADD1]]
-; CHECK: call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %add = fadd fast half %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %add
-}
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfbfmin" {
-; CHECK-LABEL: @fadd_fast_bfloat
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <16 x bfloat>
-; CHECK: %[[LOAD2:.*]] = load <16 x bfloat>
-; CHECK: %[[FADD1:.*]] = fadd fast <16 x bfloat> %[[LOAD1]]
-; CHECK: %[[FADD2:.*]] = fadd fast <16 x bfloat> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = fadd fast <16 x bfloat> %[[FADD2]], %[[FADD1]]
-; CHECK: call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %add = fadd fast bfloat %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret bfloat %add
-}
-
-; FMIN (FAST)
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
-; CHECK-LABEL: @fmin_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
-; CHECK: %[[FCMP1:.*]] = fcmp olt <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp olt <vscale x 8 x float> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp olt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
-; CHECK-NEXT: call float @llvm.vector.reduce.fmin.nxv8f32(<vscale x 8 x float> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %cmp.i = fcmp olt float %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret float %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
-; CHECK-LABEL: @fmin_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x half>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x half>
-; CHECK: %[[FCMP1:.*]] = fcmp olt <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp olt <vscale x 8 x half> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp olt <vscale x 8 x half> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x half> %[[SEL1]], <vscale x 8 x half> %[[SEL2]]
-; CHECK-NEXT: call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %cmp.i = fcmp olt half %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
-; CHECK-LABEL: @fmin_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[FCMP1:.*]] = fcmp olt <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp olt <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp olt <vscale x 8 x bfloat> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x bfloat> %[[SEL1]], <vscale x 8 x bfloat> %[[SEL2]]
-; CHECK-NEXT: call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %cmp.i = fcmp olt bfloat %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret bfloat %.sroa.speculated
-}
-
-; FMAX (FAST)
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
-; CHECK-LABEL: @fmax_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
-; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
-; CHECK-NEXT: call fast float @llvm.vector.reduce.fmax.nxv8f32(<vscale x 8 x float> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %cmp.i = fcmp fast ogt float %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret float %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
-; CHECK-LABEL: @fmax_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x half>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x half>
-; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x half> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x half> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x half> %[[SEL1]], <vscale x 8 x half> %[[SEL2]]
-; CHECK-NEXT: call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %cmp.i = fcmp fast ogt half %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
-; CHECK-LABEL: @fmax_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x bfloat> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x bfloat> %[[SEL1]], <vscale x 8 x bfloat> %[[SEL2]]
-; CHECK-NEXT: call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %cmp.i = fcmp fast ogt bfloat %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret bfloat %.sroa.speculated
-}
-
-; Reduction cannot be vectorized
-
-; MUL
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2)
-define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @mul
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <8 x i32>
-; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD1]]
-; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]]
-; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %mul, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %mul = mul nsw i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %mul
-}
-
-; Note: This test was added to ensure we always check the legality of reductions (and emit a warning if necessary) before checking for memory dependencies
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2)
-define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @memory_dependence
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <8 x i32>
-; CHECK: %[[LOAD3:.*]] = load <8 x i32>
-; CHECK: %[[LOAD4:.*]] = load <8 x i32>
-; CHECK: %[[ADD1:.*]] = add nsw <8 x i32> %[[LOAD3]], %[[LOAD1]]
-; CHECK: %[[ADD2:.*]] = add nsw <8 x i32> %[[LOAD4]], %[[LOAD2]]
-; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD3]]
-; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD4]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]]
-; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %i = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %sum = phi i32 [ %mul, %for.body ], [ 2, %entry ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %i
- %0 = load i32, ptr %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 %i
- %1 = load i32, ptr %arrayidx1, align 4
- %add = add nsw i32 %1, %0
- %add2 = add nuw nsw i64 %i, 32
- %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %add2
- store i32 %add, ptr %arrayidx3, align 4
- %mul = mul nsw i32 %1, %sum
- %inc = add nuw nsw i64 %i, 1
- %exitcond.not = icmp eq i64 %inc, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret i32 %mul
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 4, interleaved count: 2)
-define float @fmuladd(ptr %a, ptr %b, i64 %n) {
-; CHECK-LABEL: @fmuladd(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>
-; CHECK: [[MULADD1:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD2]], <vscale x 4 x float> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <vscale x 4 x float> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv
- %1 = load float, ptr %arrayidx2, align 4
- %muladd = tail call reassoc float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret float %muladd
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" {
-; CHECK-LABEL: @fmuladd_f16_zvfh(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x half>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x half>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x half>
-; CHECK: [[MULADD1:%.*]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD2]], <vscale x 8 x half> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <vscale x 8 x half> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
- %1 = load half, ptr %arrayidx2, align 4
- %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret half %muladd
-}
-
-
-; We can't scalably vectorize reductions of f16 with zvfhmin or bf16 with zvfbfmin, so make sure we use fixed-length vectors instead.
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvfhmin" {
-; CHECK-LABEL: @fmuladd_f16_zvfhmin(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <16 x half>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <16 x half>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <16 x half>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <16 x half>
-; CHECK: [[MULADD1:%.*]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD]], <16 x half> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
- %1 = load half, ptr %arrayidx2, align 4
- %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret half %muladd
-}
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin" {
-; CHECK-LABEL: @fmuladd_bf16(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <16 x bfloat>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <16 x bfloat>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <16 x bfloat>
-; CHECK: [[MULADD1:%.*]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD]], <16 x bfloat> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds bfloat, ptr %b, i64 %iv
- %1 = load bfloat, ptr %arrayidx2, align 4
- %muladd = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat %0, bfloat %1, bfloat %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret bfloat %muladd
-}
-
-declare float @llvm.fmuladd.f32(float, float, float)
-
-attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
-attributes #1 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfhmin,+zvfhmin"}
-attributes #2 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfbfmin,+zvfbfmin"}
-
-!0 = distinct !{!0, !1, !2, !3, !4}
-!1 = !{!"llvm.loop.vectorize.width", i32 8}
-!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
-!3 = !{!"llvm.loop.interleave.count", i32 2}
-!4 = !{!"llvm.loop.vectorize.enable", i1 true}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index 60f3181..ed50796 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -24,7 +24,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
@@ -32,6 +32,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -88,7 +89,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
@@ -96,6 +97,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
@@ -149,7 +151,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
@@ -159,6 +161,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[TMP11]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP7]])
; CHECK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
@@ -220,12 +223,13 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
@@ -276,13 +280,14 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: store i64 [[V]], ptr [[B:%.*]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
@@ -395,7 +400,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
@@ -403,6 +408,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
index 745b8ba..5c6febc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
@@ -1,60 +1,57 @@
-; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -S \
-; RUN: < %s | FileCheck %s
-; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 \
-; RUN: -scalable-vectorization=on -S < %s | FileCheck %s -check-prefix=SCALABLE
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
+; RUN: opt -p loop-vectorize -mtriple riscv64 -mattr=+v -S < %s | FileCheck %s
-target triple = "riscv64"
-
-define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) #0 {
-; CHECK-LABEL: @select_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp sge <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0
-; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp sge <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
+; CHECK-LABEL: define i32 @select_icmp(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp sge <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]]
+; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[COND_LCSSA]]
;
entry:
br label %for.body
@@ -74,56 +71,57 @@ for.end:
ret i32 %cond
}
-define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) #0 {
-; CHECK-LABEL: @select_fcmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[X:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast uge <4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_fcmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[X:%.*]], i64 0
-; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = fcmp fast uge <vscale x 4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
+; CHECK-LABEL: define i32 @select_fcmp(
+; CHECK-SAME: float [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[X]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast uge <vscale x 4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]]
+; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[COND_LCSSA]]
;
entry:
br label %for.body
@@ -143,52 +141,55 @@ for.end:
ret i32 %cond
}
-define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) #0 {
-; CHECK-LABEL: @select_const_i32_from_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 7, i32 3
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_const_i32_from_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 7, i32 3
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) {
+; CHECK-LABEL: define i32 @select_const_i32_from_icmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 3, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
+; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7
+; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -208,52 +209,55 @@ exit: ; preds = %for.body
ret i32 %5
}
-define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64 %n) #0 {
-; CHECK-LABEL: @select_i32_from_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %b, i32 %a
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_i32_from_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %b, i32 %a
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64 %n) {
+; CHECK-LABEL: define i32 @select_i32_from_icmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i32 [[A:%.*]], i32 [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
+; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]]
+; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -273,52 +277,55 @@ exit: ; preds = %for.body
ret i32 %5
}
-define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) #0 {
-; CHECK-LABEL: @select_const_i32_from_fcmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[V:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast one <4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 2
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_const_i32_from_fcmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[V:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = fcmp fast one <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 2
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) {
+; CHECK-LABEL: define i32 @select_const_i32_from_fcmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast one <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00
+; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1
+; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -338,12 +345,24 @@ exit: ; preds = %for.body
ret i32 %5
}
-define float @select_const_f32_from_icmp(ptr nocapture readonly %v, i64 %n) #0 {
-; CHECK-LABEL: @select_const_f32_from_icmp
-; CHECK-NOT: vector.body
-;
-; SCALABLE-LABEL: @select_const_f32_from_icmp
-; SCALABLE-NOT: vector.body
+define float @select_const_f32_from_icmp(ptr nocapture readonly %v, i64 %n) {
+; CHECK-LABEL: define float @select_const_f32_from_icmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[TMP6:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi fast float [ 3.000000e+00, %[[ENTRY]] ], [ [[TMP5:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 3
+; CHECK-NEXT: [[TMP5]] = select fast i1 [[TMP4]], float [[TMP1]], float 7.000000e+00
+; CHECK-NEXT: [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[TMP6]], [[N]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[TMP5]], %[[FOR_BODY]] ]
+; CHECK-NEXT: ret float [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -363,60 +382,67 @@ exit: ; preds = %for.body
ret float %5
}
-define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i64 %n) #0 {
-; CHECK-LABEL: @pred_select_const_i32_from_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[SRC1:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], splat (i32 35)
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[SRC2:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP5]], i32 4, <4 x i1> [[TMP4]], <4 x i32> poison)
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
-; CHECK-NEXT: [[TMP9:%.*]] = or <4 x i1> [[VEC_PHI]], [[TMP8]]
-; CHECK-NEXT: [[PREDPHI]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP9]], <4 x i1> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i64 %n) {
+; CHECK-LABEL: define i32 @pred_select_const_i32_from_icmp(
+; CHECK-SAME: ptr noalias readonly captures(none) [[SRC1:%.*]], ptr noalias readonly captures(none) [[SRC2:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 35)
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
+; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
+; CHECK-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[PREDPHI]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP12]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 0
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @pred_select_const_i32_from_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[SRC1:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 35)
-; SCALABLE-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[SRC2:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP9]], i32 4, <vscale x 4 x i1> [[TMP8]], <vscale x 4 x i32> poison)
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
-; SCALABLE-NEXT: [[TMP13:%.*]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP12]]
-; SCALABLE-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP8]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[VEC_PHI]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
-; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP18]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 0
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]])
+; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35
+; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_INC]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[I_013]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP15]], 2
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[CMP3]], i32 1, i32 [[R_012]]
+; CHECK-NEXT: br label %[[FOR_INC]]
+; CHECK: [[FOR_INC]]:
+; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ]
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: [[FOR_END_LOOPEXIT]]:
+; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[R_1_LCSSA]]
;
entry:
br label %for.body
@@ -446,5 +472,3 @@ for.end.loopexit: ; preds = %for.inc
%r.1.lcssa = phi i32 [ %r.1, %for.inc ]
ret i32 %r.1.lcssa
}
-
-attributes #0 = { "target-features"="+f,+v" }
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
index 9377854..38e7832 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
@@ -33,7 +33,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -42,6 +42,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -158,7 +159,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -167,6 +168,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -283,7 +285,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -292,6 +294,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -408,7 +411,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -417,6 +420,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -533,7 +537,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -542,6 +546,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -658,7 +663,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -667,6 +672,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -783,7 +789,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -792,6 +798,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -908,7 +915,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -917,6 +924,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1033,7 +1041,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1042,6 +1050,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1158,7 +1167,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1167,6 +1176,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1283,7 +1293,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1292,6 +1302,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1408,7 +1419,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1417,6 +1428,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1533,7 +1545,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1542,6 +1554,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1661,7 +1674,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1670,6 +1683,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1788,7 +1802,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1797,6 +1811,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1915,7 +1930,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1924,6 +1939,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -2042,7 +2058,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -2051,6 +2067,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -2222,7 +2239,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -2231,6 +2248,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
index f94f62d..f604745 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
@@ -38,7 +38,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -49,6 +49,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -186,7 +187,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -197,6 +198,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -334,7 +336,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -345,6 +347,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -482,7 +485,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -493,6 +496,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -626,7 +630,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -635,6 +639,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP24]], ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -754,7 +759,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
@@ -763,6 +768,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP17]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -882,7 +888,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -893,6 +899,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP15]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1020,7 +1027,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1031,6 +1038,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP15]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1158,7 +1166,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1167,6 +1175,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP24]], ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index 2253724..ce2b790 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -33,7 +33,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]]
@@ -42,6 +42,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -160,7 +161,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META10:![0-9]+]]
@@ -169,6 +170,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META13:![0-9]+]], !noalias [[META10]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -287,7 +289,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META17:![0-9]+]]
@@ -296,6 +298,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -414,7 +417,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META24:![0-9]+]]
@@ -423,6 +426,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META27:![0-9]+]], !noalias [[META24]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -541,7 +545,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META31:![0-9]+]]
@@ -550,6 +554,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META34:![0-9]+]], !noalias [[META31]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -668,7 +673,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -677,6 +682,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -795,7 +801,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -804,6 +810,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -922,7 +929,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -931,6 +938,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1049,7 +1057,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -1058,6 +1066,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1176,7 +1185,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
@@ -1185,6 +1194,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[TMP18]], ptr align 8 [[TMP19]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1298,7 +1308,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -1310,6 +1320,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index 48e080c..d02d53b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -36,7 +36,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV1]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -46,6 +46,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP20]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP11]])
; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP22]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP22]]
; IF-EVL-OUTLOOP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -87,7 +88,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -97,6 +98,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]]
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -193,7 +195,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -257,7 +259,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -272,6 +274,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[PREDPHI]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[PREDPHI1]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP11]])
; IF-EVL-OUTLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP23]]
; IF-EVL-OUTLOOP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -317,7 +320,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -326,6 +329,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]]
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -430,7 +434,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -508,7 +512,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = mul i32 1, [[TMP12]]
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP13]], i64 0
@@ -521,6 +525,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP19]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP18]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP12]])
; IF-EVL-OUTLOOP-NEXT: [[TMP20:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP20]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
@@ -568,7 +573,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[ADD:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]]
; IF-EVL-INLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP12]], i64 0
@@ -581,6 +586,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[ADD]] = add i32 [[TMP17]], [[RDX]]
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP19]], [[EVL_BASED_IV1]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
@@ -697,7 +703,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP15:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_IND]]
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
-; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[TMP17]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -768,7 +774,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND2:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT7:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP14]], i64 0
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -786,6 +792,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP24]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[PREDPHI]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]])
; IF-EVL-OUTLOOP-NEXT: [[TMP25:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP25]], [[IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT7]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT2]]
; IF-EVL-OUTLOOP-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
@@ -837,7 +844,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]]
; IF-EVL-INLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP12]], i64 0
@@ -849,6 +856,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP17]] = add i32 [[TMP16]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
@@ -973,7 +981,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP15:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_IND]]
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
-; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[TMP17]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index 9299866..ae047f5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -24,7 +24,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -36,6 +36,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -144,7 +145,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -156,6 +157,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -263,7 +265,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -275,6 +277,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -382,7 +385,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -394,6 +397,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index f8f3532..987f946 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -32,8 +32,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP25]], %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP12]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -43,6 +43,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -168,8 +169,8 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR2:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT1]], %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP32]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP15]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP15]])
@@ -180,6 +181,7 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP15]])
; IF-EVL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP15]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -325,8 +327,8 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR2:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT1]], %[[VECTOR_PH]] ], [ [[TMP22:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR4:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT3]], %[[VECTOR_PH]] ], [ [[TMP23:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP39]], %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP18]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP20]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]])
@@ -339,6 +341,7 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP5]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]])
; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP18]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP27]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
; IF-EVL-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -639,8 +642,8 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 2 x i64> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[TMP20:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP11]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP7]]
@@ -652,6 +655,7 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 0eab77d..2aeb1d0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -27,7 +27,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -41,6 +41,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> align 4 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index c719912..3e23df7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -26,16 +26,17 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.add.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.add.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = add i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -75,7 +76,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -129,20 +130,20 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4
-; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]])
-; IF-EVL-NEXT: [[MUL]] = mul i32 [[TMP5]], [[RDX]]
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]])
-; IF-EVL-NEXT: [[TMP6]] = mul i32 [[TMP8]], [[VEC_PHI1]]
+; IF-EVL-NEXT: [[MUL]] = mul i32 [[RDX]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD3]])
+; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP6]], [[MUL]]
+; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; IF-EVL: scalar.ph:
@@ -179,9 +180,9 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4
; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP7]] = mul i32 [[TMP6]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP7]] = mul i32 [[VEC_PHI]], [[TMP6]]
; NO-VP-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]])
-; NO-VP-NEXT: [[TMP9]] = mul i32 [[TMP8]], [[VEC_PHI1]]
+; NO-VP-NEXT: [[TMP9]] = mul i32 [[VEC_PHI1]], [[TMP8]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -240,16 +241,17 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.or.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.or.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = or i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -289,7 +291,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = or i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = or i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -347,16 +349,17 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.and.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.and.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = and i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -396,7 +399,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = and i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = and i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -454,16 +457,17 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.xor.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.xor.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = xor i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -503,7 +507,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = xor i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = xor i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
@@ -556,21 +560,22 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smin.nxv4i32(i32 2147483647, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smin.nxv4i32(i32 2147483647, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -666,21 +671,22 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smax.nxv4i32(i32 -2147483648, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smax.nxv4i32(i32 -2147483648, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smax.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -776,21 +782,22 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umin.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umin.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umin.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -886,21 +893,22 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umax.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umax.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umax.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1001,16 +1009,17 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = fadd reassoc float [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1050,7 +1059,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -1104,20 +1113,20 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4
-; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP5:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]])
-; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP5]], [[RDX]]
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]])
-; IF-EVL-NEXT: [[TMP6]] = fmul reassoc float [[TMP8]], [[VEC_PHI1]]
+; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[RDX]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD3]])
+; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP6]], [[MUL]]
+; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; IF-EVL: scalar.ph:
@@ -1154,9 +1163,9 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
; NO-VP-NEXT: [[TMP6:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP7]] = fmul reassoc float [[TMP6]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP7]] = fmul reassoc float [[VEC_PHI]], [[TMP6]]
; NO-VP-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]])
-; NO-VP-NEXT: [[TMP9]] = fmul reassoc float [[TMP8]], [[VEC_PHI1]]
+; NO-VP-NEXT: [[TMP9]] = fmul reassoc float [[VEC_PHI1]], [[TMP8]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
@@ -1210,22 +1219,23 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmin.nxv4f32(float 0x47EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmin.nxv4f32(float 0x47EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt float [[TMP14]], [[VEC_PHI]]
; IF-EVL-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP14]], float [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1322,22 +1332,23 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmax.nxv4f32(float 0xC7EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmax.nxv4f32(float 0xC7EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast ogt float [[TMP14]], [[VEC_PHI]]
; IF-EVL-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP14]], float [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1435,19 +1446,19 @@ define float @fminimum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8
-; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]])
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
+; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
+; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]])
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -1543,19 +1554,19 @@ define float @fmaximum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8
-; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]])
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
+; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
+; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]])
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -1655,19 +1666,20 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = fmul reassoc <vscale x 4 x float> [[VP_OP_LOAD]], [[VP_OP_LOAD1]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18]] = fadd reassoc float [[TMP17]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1712,7 +1724,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
; NO-VP-NEXT: [[TMP11:%.*]] = fmul reassoc <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP12:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP11]])
-; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[TMP12]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[VEC_PHI]], [[TMP12]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
@@ -1774,16 +1786,17 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
-; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = icmp slt <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
@@ -1890,16 +1903,17 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = fcmp fast olt <vscale x 4 x float> [[VP_OP_LOAD]], splat (float 3.000000e+00)
-; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = fcmp fast olt <vscale x 4 x float> [[VP_OP_LOAD]], splat (float 3.000000e+00)
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 2678989..1f7c518 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -28,7 +28,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP13]]
@@ -43,6 +43,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP26]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
index 03bedde..be6ae1d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
@@ -44,7 +44,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP10]], [[ENTRY]] ], [ [[TMP19:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]]
@@ -52,6 +52,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP-NEXT: [[TMP19]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP12]])
; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP21]]
; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -100,7 +101,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP13]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META0:![0-9]+]]
@@ -108,6 +109,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP13]], [[TMP23]]
; IF-EVL-INLOOP-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -214,7 +216,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META0:![0-9]+]]
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index 7082c12d..d474a03 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -23,13 +23,14 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP_LOAD]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i32 [[TMP12]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[TMP11]], [[TMP12]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index 3b3b798..06c6bfe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -27,7 +27,7 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[UMAX]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[UMAX]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
@@ -35,6 +35,7 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[UMAX]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -95,7 +96,7 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
@@ -103,6 +104,7 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -163,7 +165,7 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TC_ADD]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TC_ADD]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -171,6 +173,7 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP5]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC_ADD]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index b6f2a50..5f407fc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -23,7 +23,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -34,6 +34,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP19]], <vscale x 4 x i1> [[TMP17]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP9]], [[TMP21]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 1a6e60d8..59d1370 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -26,13 +26,14 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP14]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP9]], [[TMP15]]
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index ba2ee84f..2d5718b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -25,7 +25,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -33,6 +33,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
@@ -242,7 +243,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -250,6 +251,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
@@ -352,7 +354,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -360,6 +362,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
@@ -462,7 +465,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -470,6 +473,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
@@ -573,7 +577,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -582,6 +586,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
@@ -690,7 +695,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -699,6 +704,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: middle.block:
@@ -807,7 +813,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -816,6 +822,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
@@ -924,7 +931,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -933,6 +940,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: middle.block:
@@ -1040,7 +1048,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1048,6 +1056,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[VP_OP]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: middle.block:
@@ -1258,7 +1267,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1267,6 +1276,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: middle.block:
@@ -1375,7 +1385,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1384,6 +1394,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: middle.block:
@@ -1707,7 +1718,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1717,6 +1728,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP17]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP16]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: middle.block:
@@ -1826,7 +1838,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1834,6 +1846,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: middle.block:
@@ -1942,7 +1955,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1950,6 +1963,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index 297a410..e2db28d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -23,7 +23,7 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], -1
@@ -47,6 +47,7 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
@@ -164,7 +165,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[OFFSET_IDX3:%.*]] = trunc i64 [[EVL_BASED_IV]] to i32
@@ -194,6 +195,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE5]], ptr align 4 [[TMP25]], <vscale x 4 x i1> [[VP_REVERSE_MASK6]], i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP28:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP28]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]]
; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
@@ -343,7 +345,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
@@ -377,6 +379,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_REVERSE2]], ptr align 1 [[TMP26]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP27]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
; IF-EVL-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; IF-EVL-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index 47f1cfb..1c78b25 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -26,7 +26,7 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP5:%.*]] = sub i64 200, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP5]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
@@ -35,6 +35,7 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP5]], [[TMP13]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
@@ -356,7 +357,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP5:%.*]] = sub i64 200, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP5]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
@@ -365,6 +366,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP5]], [[TMP13]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
@@ -461,7 +463,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 3002, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 3002, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp ult i64 [[AVL]], 1024
; IF-EVL-NEXT: [[SAFE_AVL:%.*]] = select i1 [[TMP9]], i64 [[AVL]], i64 1024
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[SAFE_AVL]], i32 1, i1 true)
@@ -472,6 +474,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP5]], <vscale x 1 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 3002
; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index 4ce2da7..687a2e7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -26,7 +26,7 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TMP0]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[SPEC_SELECT]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = sub nuw nsw i64 1, [[OFFSET_IDX]]
@@ -41,6 +41,7 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_REVERSE]], ptr align 8 [[TMP19]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index c21847f..24649729 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -166,10 +166,11 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 9, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> align 1 [[BROADCAST_SPLAT2]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index b40d980..dfdc893 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -22,11 +22,12 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 9, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0(<vscale x 8 x i8> zeroinitializer, <vscale x 8 x ptr> align 1 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 9
; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
index 46ecb19..6476373 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
@@ -33,7 +33,7 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TMP0]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META0:![0-9]+]]
@@ -47,6 +47,7 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 41b9636..568aa95 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -99,7 +99,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
@@ -108,6 +108,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
@@ -410,7 +411,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -428,9 +429,10 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr align 8 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; TF-SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; TF-SCALABLE-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -569,7 +571,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
@@ -578,6 +580,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
@@ -707,13 +710,14 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
@@ -865,7 +869,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP13]]
@@ -876,6 +880,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT3]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
@@ -1041,7 +1046,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP11]]
@@ -1053,6 +1058,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; TF-SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
@@ -1194,13 +1200,14 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index 65fc18a..85116fe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -23,7 +23,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -34,6 +34,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP20]]
; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
index 7f1066c..c058789 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
@@ -20,7 +20,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -35,6 +35,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMAX]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -76,7 +77,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -91,6 +92,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMIN]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -132,7 +134,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -147,6 +149,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMAX]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -188,7 +191,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -203,6 +206,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMIN]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -244,7 +248,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -256,6 +260,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTLZ]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -295,7 +300,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -307,6 +312,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTTZ]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -346,7 +352,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -360,6 +366,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -401,7 +408,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -415,6 +422,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -456,7 +464,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -468,6 +476,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ABS]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
index c1b656a..8d3fe48 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
@@ -19,7 +19,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -31,6 +31,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -72,7 +73,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -84,6 +85,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ZEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -123,7 +125,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -135,6 +137,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -174,7 +177,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -186,6 +189,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -225,7 +229,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -237,6 +241,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -276,7 +281,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -288,6 +293,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SITOFP]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -327,7 +333,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -339,6 +345,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[UITOFP]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -378,7 +385,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -390,6 +397,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOSI]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -429,7 +437,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -441,6 +449,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOUI]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -480,7 +489,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -492,6 +501,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[INTTOPTR]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -532,7 +542,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: EMIT vp<[[INDEX:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]>
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[INDEX_EVL:%.+]]> = phi ir<0>, vp<[[INDEX_EVL_NEXT:%.+]]>
; IF-EVL-NEXT: ir<[[IV:%.+]]> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[EVL]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<[[N]]>, vp<[[INDEX_EVL]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[INDEX_EVL]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: WIDEN-GEP Inv[Var] ir<[[GEP:%.+]]> = getelementptr inbounds ir<%b>, ir<[[IV]]>
@@ -542,6 +552,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[VECTOR_PTR]]>, ir<[[PTRTOINT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[ZEXT:%.+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[INDEX_EVL_NEXT]]> = add vp<[[ZEXT]]>, vp<[[INDEX_EVL]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[ZEXT]]>
; IF-EVL-NEXT: EMIT vp<[[INDEX_NEXT]]> = add vp<[[INDEX]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
index 9900602..ab4bb90 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
@@ -26,8 +26,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<[[FOR_PHI:%.+]]> = phi ir<33>, ir<[[LD:%.+]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%TC>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[PREV_EVL:%.+]]> = phi [ vp<[[VF32]]>, vector.ph ], [ vp<[[EVL:%.+]]>, vector.body ]
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%TC>, vp<[[EVL_PHI]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds nuw ir<%A>, vp<[[ST]]
@@ -40,6 +40,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
index 1c9554d..dff4971 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
@@ -40,7 +40,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-OUTLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-OUTLOOP-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX_PHI:%.+]]> = phi vp<[[RDX_START]]>, vp<[[RDX_SELECT:%.+]]>
-; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%n>, vp<[[EVL_PHI]]>
+; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%n>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-OUTLOOP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-OUTLOOP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
@@ -50,6 +50,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: WIDEN-INTRINSIC vp<[[RDX_SELECT]]> = call llvm.vp.merge(ir<true>, ir<[[ADD]]>, ir<[[RDX_PHI]]>, vp<[[EVL]]>)
; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-OUTLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-OUTLOOP-NEXT: No successors
@@ -79,7 +80,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-INLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-INLOOP-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX_PHI:%.+]]> = phi vp<[[RDX_START]]>, ir<[[RDX_NEXT:%.+]]>
-; IF-EVL-INLOOP-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%n>, vp<[[EVL_PHI]]>
+; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%n>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-INLOOP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-INLOOP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
@@ -88,6 +89,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: REDUCE ir<[[ADD:%.+]]> = ir<[[RDX_PHI]]> + vp.reduce.add (ir<[[LD1]]>, vp<[[EVL]]>)
; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-INLOOP-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-INLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-INLOOP-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
index 42a846a..b3a611e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
@@ -24,7 +24,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -39,6 +39,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
index e89f41b..97b5210 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
@@ -142,40 +142,40 @@ define void @fp_iv_loop2(ptr noalias nocapture %A, i32 %N) {
; AUTO_VEC-NEXT: br i1 [[TMP0]], label [[FOR_END_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]]
; AUTO_VEC: for.body.preheader.new:
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[ZEXT]], 2147483640
-; AUTO_VEC-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 4
-; AUTO_VEC-NEXT: [[INVARIANT_GEP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 8
-; AUTO_VEC-NEXT: [[INVARIANT_GEP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 12
-; AUTO_VEC-NEXT: [[INVARIANT_GEP5:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16
-; AUTO_VEC-NEXT: [[INVARIANT_GEP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 20
-; AUTO_VEC-NEXT: [[INVARIANT_GEP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 24
-; AUTO_VEC-NEXT: [[INVARIANT_GEP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 28
; AUTO_VEC-NEXT: br label [[FOR_BODY:%.*]]
; AUTO_VEC: for.body:
; AUTO_VEC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[X_06:%.*]] = phi float [ 1.000000e+00, [[FOR_BODY_PREHEADER_NEW]] ], [ [[CONV1_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_7:%.*]], [[FOR_BODY]] ]
-; AUTO_VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDVARS_IV]]
; AUTO_VEC-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; AUTO_VEC-NEXT: [[CONV1:%.*]] = fadd float [[X_06]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4
; AUTO_VEC-NEXT: store float [[CONV1]], ptr [[ARRAYIDX_1]], align 4
; AUTO_VEC-NEXT: [[CONV1_1:%.*]] = fadd float [[CONV1]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP1]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 8
; AUTO_VEC-NEXT: store float [[CONV1_1]], ptr [[ARRAYIDX_2]], align 4
; AUTO_VEC-NEXT: [[CONV1_2:%.*]] = fadd float [[CONV1_1]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP3]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP3]], i64 12
; AUTO_VEC-NEXT: store float [[CONV1_2]], ptr [[ARRAYIDX_3]], align 4
; AUTO_VEC-NEXT: [[CONV1_3:%.*]] = fadd float [[CONV1_2]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP5]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP4]], i64 16
; AUTO_VEC-NEXT: store float [[CONV1_3]], ptr [[ARRAYIDX_4]], align 4
; AUTO_VEC-NEXT: [[CONV1_4:%.*]] = fadd float [[CONV1_3]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP7]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 20
; AUTO_VEC-NEXT: store float [[CONV1_4]], ptr [[ARRAYIDX_5]], align 4
; AUTO_VEC-NEXT: [[CONV1_5:%.*]] = fadd float [[CONV1_4]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP9]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 24
; AUTO_VEC-NEXT: store float [[CONV1_5]], ptr [[ARRAYIDX_6]], align 4
; AUTO_VEC-NEXT: [[CONV1_6:%.*]] = fadd float [[CONV1_5]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP11]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i64 28
; AUTO_VEC-NEXT: store float [[CONV1_6]], ptr [[ARRAYIDX_7]], align 4
; AUTO_VEC-NEXT: [[CONV1_7]] = fadd float [[CONV1_6]], 5.000000e-01
; AUTO_VEC-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV]], 8
@@ -299,40 +299,40 @@ define double @external_use_without_fast_math(ptr %a, i64 %n) {
; AUTO_VEC-NEXT: br i1 [[TMP0]], label [[FOR_END_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]]
; AUTO_VEC: entry.new:
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[SMAX]], 9223372036854775800
-; AUTO_VEC-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 8
-; AUTO_VEC-NEXT: [[INVARIANT_GEP2:%.*]] = getelementptr i8, ptr [[A]], i64 16
-; AUTO_VEC-NEXT: [[INVARIANT_GEP4:%.*]] = getelementptr i8, ptr [[A]], i64 24
-; AUTO_VEC-NEXT: [[INVARIANT_GEP6:%.*]] = getelementptr i8, ptr [[A]], i64 32
-; AUTO_VEC-NEXT: [[INVARIANT_GEP8:%.*]] = getelementptr i8, ptr [[A]], i64 40
-; AUTO_VEC-NEXT: [[INVARIANT_GEP10:%.*]] = getelementptr i8, ptr [[A]], i64 48
-; AUTO_VEC-NEXT: [[INVARIANT_GEP12:%.*]] = getelementptr i8, ptr [[A]], i64 56
; AUTO_VEC-NEXT: br label [[FOR_BODY:%.*]]
; AUTO_VEC: for.body:
; AUTO_VEC-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY_NEW]] ], [ [[I_NEXT_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[J:%.*]] = phi double [ 0.000000e+00, [[ENTRY_NEW]] ], [ [[J_NEXT_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[ENTRY_NEW]] ], [ [[NITER_NEXT_7:%.*]], [[FOR_BODY]] ]
-; AUTO_VEC-NEXT: [[T0:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0:%.*]] = getelementptr double, ptr [[A:%.*]], i64 [[I]]
; AUTO_VEC-NEXT: store double [[J]], ptr [[T0]], align 8
; AUTO_VEC-NEXT: [[J_NEXT:%.*]] = fadd double [[J]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_1:%.*]] = getelementptr double, ptr [[INVARIANT_GEP]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_1:%.*]] = getelementptr i8, ptr [[TMP1]], i64 8
; AUTO_VEC-NEXT: store double [[J_NEXT]], ptr [[T0_1]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_1:%.*]] = fadd double [[J_NEXT]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_2:%.*]] = getelementptr double, ptr [[INVARIANT_GEP2]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_2:%.*]] = getelementptr i8, ptr [[TMP2]], i64 16
; AUTO_VEC-NEXT: store double [[J_NEXT_1]], ptr [[T0_2]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_2:%.*]] = fadd double [[J_NEXT_1]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_3:%.*]] = getelementptr double, ptr [[INVARIANT_GEP4]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_3:%.*]] = getelementptr i8, ptr [[TMP3]], i64 24
; AUTO_VEC-NEXT: store double [[J_NEXT_2]], ptr [[T0_3]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_3:%.*]] = fadd double [[J_NEXT_2]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_4:%.*]] = getelementptr double, ptr [[INVARIANT_GEP6]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_4:%.*]] = getelementptr i8, ptr [[TMP4]], i64 32
; AUTO_VEC-NEXT: store double [[J_NEXT_3]], ptr [[T0_4]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_4:%.*]] = fadd double [[J_NEXT_3]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_5:%.*]] = getelementptr double, ptr [[INVARIANT_GEP8]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_5:%.*]] = getelementptr i8, ptr [[TMP5]], i64 40
; AUTO_VEC-NEXT: store double [[J_NEXT_4]], ptr [[T0_5]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_5:%.*]] = fadd double [[J_NEXT_4]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_6:%.*]] = getelementptr double, ptr [[INVARIANT_GEP10]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_6:%.*]] = getelementptr i8, ptr [[TMP6]], i64 48
; AUTO_VEC-NEXT: store double [[J_NEXT_5]], ptr [[T0_6]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_6:%.*]] = fadd double [[J_NEXT_5]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_7:%.*]] = getelementptr double, ptr [[INVARIANT_GEP12]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_7:%.*]] = getelementptr i8, ptr [[TMP7]], i64 56
; AUTO_VEC-NEXT: store double [[J_NEXT_6]], ptr [[T0_7]], align 8
; AUTO_VEC-NEXT: [[I_NEXT_7]] = add nuw nsw i64 [[I]], 8
; AUTO_VEC-NEXT: [[J_NEXT_7]] = fadd double [[J_NEXT_6]], 3.000000e+00
diff --git a/llvm/test/Transforms/LoopVectorize/intrinsic.ll b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
index 10d83a4..9c910d7 100644
--- a/llvm/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
@@ -324,56 +324,6 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.exp2.f64(double)
-define void @ldexp_f32i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
-; CHECK-LABEL: @ldexp_f32i32(
-; CHECK: llvm.ldexp.v4f32.v4i32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call float @llvm.ldexp.f32.i32(float %0, i32 %exp)
- %arrayidx2 = getelementptr inbounds float, ptr %x, i32 %iv
- store float %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare float @llvm.ldexp.f32.i32(float, i32)
-
-define void @ldexp_f64i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
-; CHECK-LABEL: @ldexp_f64i32(
-; CHECK: llvm.ldexp.v4f64.v4i32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call double @llvm.ldexp.f64.i32(double %0, i32 %exp)
- %arrayidx2 = getelementptr inbounds double, ptr %x, i32 %iv
- store double %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare double @llvm.ldexp.f64.i32(double, i32)
-
define void @log_f32(i32 %n, ptr %y, ptr %x) {
; CHECK-LABEL: @log_f32(
; CHECK: llvm.log.v4f32
@@ -1026,157 +976,6 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.roundeven.f64(double)
-
-define void @lround_i32f32(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i32f32(
-; CHECK: llvm.lround.v4i32.v4f32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call i32 @llvm.lround.i32.f32(float %0)
- %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
- store i32 %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i32 @llvm.lround.i32.f32(float)
-
-define void @lround_i32f64(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i32f64(
-; CHECK: llvm.lround.v4i32.v4f64
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call i32 @llvm.lround.i32.f64(double %0)
- %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
- store i32 %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i32 @llvm.lround.i32.f64(double)
-
-define void @lround_i64f32(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i64f32(
-; CHECK: llvm.lround.v4i64.v4f32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call i64 @llvm.lround.i64.f32(float %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.lround.i64.f32(float)
-
-define void @lround_i64f64(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i64f64(
-; CHECK: llvm.lround.v4i64.v4f64
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call i64 @llvm.lround.i64.f64(double %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.lround.i64.f64(double)
-
-define void @llround_i64f32(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @llround_i64f32(
-; CHECK: llvm.llround.v4i64.v4f32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call i64 @llvm.llround.i64.f32(float %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.llround.i64.f32(float)
-
-define void @llround_i64f64(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @llround_i64f64(
-; CHECK: llvm.llround.v4i64.v4f64
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call i64 @llvm.llround.i64.f64(double %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.llround.i64.f64(double)
-
define void @fma_f32(i32 %n, ptr %y, ptr %x, ptr %z, ptr %w) {
; CHECK-LABEL: @fma_f32(
; CHECK: llvm.fma.v4f32
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
index a85718d..4810952 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
@@ -289,7 +289,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK-NEXT: [[TMP24:%.*]] = phi <4 x i32> [ [[TMP18]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP23]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP25:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP24]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP25]])
-; CHECK-NEXT: [[TMP27]] = and i32 [[TMP26]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP27]] = and i32 [[VEC_PHI]], [[TMP26]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -420,10 +420,10 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK-NEXT: [[TMP40:%.*]] = phi <4 x i32> [ [[TMP30]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP38]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP39]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[TMP44:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP44]])
-; CHECK-NEXT: [[TMP46]] = add i32 [[TMP45]], [[TMP43]]
+; CHECK-NEXT: [[TMP46]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
index 795605d..755d7e2 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
@@ -55,7 +55,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26]] = add i32 [[TMP25]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP26]] = add i32 [[VEC_PHI]], [[TMP25]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -158,13 +158,13 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP37]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = add i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = add i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45:%.*]] = add i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45:%.*]] = add i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[TMP46:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP46]])
-; CHECK-NEXT: [[TMP48]] = add i32 [[TMP47]], [[TMP45]]
+; CHECK-NEXT: [[TMP48]] = add i32 [[TMP45]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -256,10 +256,10 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[TMP25]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[VEC_PHI]], [[TMP25]]
; CHECK-NEXT: [[TMP27:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> splat (i32 3), <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP27]])
-; CHECK-NEXT: [[TMP29]] = add i32 [[TMP28]], [[TMP26]]
+; CHECK-NEXT: [[TMP29]] = add i32 [[TMP26]], [[TMP28]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -363,13 +363,13 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP37]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45:%.*]] = mul i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45:%.*]] = mul i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[TMP46:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP46]])
-; CHECK-NEXT: [[TMP48]] = mul i32 [[TMP47]], [[TMP45]]
+; CHECK-NEXT: [[TMP48]] = mul i32 [[TMP45]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -478,11 +478,11 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP50:%.*]] = phi <4 x i32> [ [[TMP34]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP49]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[TMP40:%.*]] = mul nsw <4 x i32> [[TMP50]], [[TMP39]]
; CHECK-NEXT: [[TMP44:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP44]])
-; CHECK-NEXT: [[TMP46]] = add i32 [[TMP45]], [[TMP43]]
+; CHECK-NEXT: [[TMP46]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -590,10 +590,10 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = mul i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = mul i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -698,10 +698,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = and i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = and i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = and i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = and i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -807,7 +807,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP40:%.*]] = add nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43]] = or i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43]] = or i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -913,7 +913,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP40:%.*]] = add nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43]] = xor i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43]] = xor i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -1124,10 +1124,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x float> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select fast <4 x i1> [[TMP0]], <4 x float> [[TMP38]], <4 x float> splat (float 1.000000e+00)
; CHECK-NEXT: [[TMP41:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = fmul fast float [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = fmul fast float [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select fast <4 x i1> [[TMP0]], <4 x float> [[TMP39]], <4 x float> splat (float 1.000000e+00)
; CHECK-NEXT: [[TMP44:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = fmul fast float [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = fmul fast float [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
index 12d83eb..20b42c3 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
@@ -24,13 +24,13 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD4]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD5]])
-; CHECK-NEXT: [[TMP9]] = add i32 [[TMP8]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP9]] = add i32 [[VEC_PHI2]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD6]])
-; CHECK-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI3]]
+; CHECK-NEXT: [[TMP11]] = add i32 [[VEC_PHI3]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -96,21 +96,21 @@ define i64 @reduction_sum_chain(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x i64>, ptr [[TMP6]], align 8
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i64>, ptr [[TMP7]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[VEC_PHI]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD4]])
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[VEC_PHI1]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD5]])
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[VEC_PHI2]], [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD6]])
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[VEC_PHI3]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[VEC_PHI3]], [[TMP14]]
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD7]])
-; CHECK-NEXT: [[TMP17]] = add i64 [[TMP16]], [[TMP9]]
+; CHECK-NEXT: [[TMP17]] = add i64 [[TMP9]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD8]])
-; CHECK-NEXT: [[TMP19]] = add i64 [[TMP18]], [[TMP11]]
+; CHECK-NEXT: [[TMP19]] = add i64 [[TMP11]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD9]])
-; CHECK-NEXT: [[TMP21]] = add i64 [[TMP20]], [[TMP13]]
+; CHECK-NEXT: [[TMP21]] = add i64 [[TMP13]], [[TMP20]]
; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD10]])
-; CHECK-NEXT: [[TMP23]] = add i64 [[TMP22]], [[TMP15]]
+; CHECK-NEXT: [[TMP23]] = add i64 [[TMP15]], [[TMP22]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -332,16 +332,16 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP98:%.*]] = phi <4 x i32> [ [[TMP92]], [[PRED_LOAD_CONTINUE34]] ], [ [[TMP97]], [[PRED_LOAD_IF35]] ]
; CHECK-NEXT: [[TMP99:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP26]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP100:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP99]])
-; CHECK-NEXT: [[TMP101]] = add i32 [[TMP100]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP101]] = add i32 [[VEC_PHI]], [[TMP100]]
; CHECK-NEXT: [[TMP102:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP50]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP103:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP102]])
-; CHECK-NEXT: [[TMP104]] = add i32 [[TMP103]], [[VEC_PHI4]]
+; CHECK-NEXT: [[TMP104]] = add i32 [[VEC_PHI4]], [[TMP103]]
; CHECK-NEXT: [[TMP105:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP74]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP106:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP105]])
-; CHECK-NEXT: [[TMP107]] = add i32 [[TMP106]], [[VEC_PHI5]]
+; CHECK-NEXT: [[TMP107]] = add i32 [[VEC_PHI5]], [[TMP106]]
; CHECK-NEXT: [[TMP108:%.*]] = select <4 x i1> [[TMP3]], <4 x i32> [[TMP98]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP109:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP108]])
-; CHECK-NEXT: [[TMP110]] = add i32 [[TMP109]], [[VEC_PHI6]]
+; CHECK-NEXT: [[TMP110]] = add i32 [[VEC_PHI6]], [[TMP109]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP111:%.*]] = icmp eq i64 [[INDEX_NEXT]], 272
@@ -571,16 +571,16 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP106:%.*]] = phi <4 x i32> [ [[TMP100]], [[PRED_LOAD_CONTINUE36]] ], [ [[TMP105]], [[PRED_LOAD_IF37]] ]
; CHECK-NEXT: [[TMP107:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP34]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP108:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP107]])
-; CHECK-NEXT: [[TMP109]] = mul i32 [[TMP108]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP109]] = mul i32 [[VEC_PHI]], [[TMP108]]
; CHECK-NEXT: [[TMP110:%.*]] = select <4 x i1> [[TMP9]], <4 x i32> [[TMP58]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP111:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP110]])
-; CHECK-NEXT: [[TMP112]] = mul i32 [[TMP111]], [[VEC_PHI4]]
+; CHECK-NEXT: [[TMP112]] = mul i32 [[VEC_PHI4]], [[TMP111]]
; CHECK-NEXT: [[TMP113:%.*]] = select <4 x i1> [[TMP10]], <4 x i32> [[TMP82]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP114:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP113]])
-; CHECK-NEXT: [[TMP115]] = mul i32 [[TMP114]], [[VEC_PHI5]]
+; CHECK-NEXT: [[TMP115]] = mul i32 [[VEC_PHI5]], [[TMP114]]
; CHECK-NEXT: [[TMP116:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> [[TMP106]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP117:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP116]])
-; CHECK-NEXT: [[TMP118]] = mul i32 [[TMP117]], [[VEC_PHI6]]
+; CHECK-NEXT: [[TMP118]] = mul i32 [[VEC_PHI6]], [[TMP117]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP119:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index b302868..0529d84 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -15,7 +15,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -63,11 +63,11 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -118,7 +118,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[TMP3]] = add i32 [[TMP2]], 12
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -168,11 +168,11 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = mul i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = mul i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -226,10 +226,10 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B1:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD2]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP6]] = add i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -282,9 +282,9 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = mul i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = mul i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
@@ -335,7 +335,7 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD2]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
@@ -385,9 +385,9 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = and i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = and i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
@@ -438,7 +438,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = or i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = or i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
@@ -489,7 +489,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = xor i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = xor i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -589,9 +589,9 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast float [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = fmul fast float [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = fmul fast float [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
@@ -826,11 +826,11 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -984,7 +984,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = fadd float [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = fadd float [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
@@ -1127,7 +1127,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) {
; CHECK-NEXT: [[TMP4:%.*]] = fmul <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[TMP4]], <4 x float> splat (float -0.000000e+00)
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = fadd float [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = fadd float [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
@@ -1210,7 +1210,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-NEXT: [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], splat (i8 31)
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
@@ -1294,10 +1294,10 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-NEXT: [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], splat (i8 31)
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP9]])
-; CHECK-NEXT: [[TMP11]] = add i32 [[TMP10]], [[TMP8]]
+; CHECK-NEXT: [[TMP11]] = add i32 [[TMP8]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
index be0e0d1..5c52b1a 100644
--- a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
@@ -18,7 +18,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
index 4612545..f8bda1c 100644
--- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
+++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
@@ -389,22 +389,18 @@ define void @scev_exp_reuse_const_add(ptr %dst, ptr %src) {
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; CHECK-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64
; CHECK-NEXT: br label %[[LOOP_1:.*]]
; CHECK: [[LOOP_1]]:
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], %[[LOOP_1]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ], [ [[PTR_IV_1_NEXT:%.*]], %[[LOOP_1]] ]
; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr i8, ptr [[PTR_IV_1]], i64 2
; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
-; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: br i1 [[C]], label %[[LOOP_2_PH:.*]], label %[[LOOP_1]]
; CHECK: [[LOOP_2_PH]]:
-; CHECK-NEXT: [[INDVAR_LCSSA:%.*]] = phi i64 [ [[INDVAR]], %[[LOOP_1]] ]
; CHECK-NEXT: [[PTR_IV_1_NEXT_LCSSA:%.*]] = phi ptr [ [[PTR_IV_1_NEXT]], %[[LOOP_1]] ]
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
-; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[DST1]], [[SRC2]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDVAR_LCSSA]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 -2, [[SRC2]]
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR_IV_1_NEXT_LCSSA]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
@@ -426,11 +422,11 @@ define void @scev_exp_reuse_const_add(ptr %dst, ptr %src) {
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 40, %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_2_PH]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[LOOP_2_PH]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[LOOP_2_PH]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP_2:.*]]
; CHECK: [[LOOP_2]]:
; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP_2]] ]
-; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2]] ]
+; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2]] ]
; CHECK-NEXT: [[IV_2_NEXT]] = add i64 [[IV_1]], 1
; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV_2_NEXT]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC_1]], align 2
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
index af2b238..efd9f8b 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
@@ -15,6 +15,13 @@ define void @ld_and_neg1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = and i64 [[INDEX]], -1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <2 x i64> [[WIDE_LOAD]], splat (i64 42)
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -54,6 +61,10 @@ define void @ld_and_neg2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -97,6 +108,11 @@ define void @ld_and_neg3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP6]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = add nsw <2 x i64> [[TMP8]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP10]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -351,6 +367,11 @@ define void @ld_and_neg2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP6]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = add nsw <2 x i64> [[TMP8]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP10]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml b/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
index 5457e36..81df2f1 100644
--- a/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
+++ b/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
@@ -19,4 +19,12 @@ GlobalValueMap:
15859245615183425489: # guid("internal")
- Linkage: 7 # internal
Live: true
+ 1062103744896965210: # guid("alias1")
+ - Linkage: 4 # weak
+ Live: true
+ Aliasee: 16594175687743574550 # guid("external_addrtaken")
+ 2510616090736846890: # guid("alias2")
+ - Linkage: 0 # weak
+ Live: true
+ Aliasee: 16594175687743574550 # guid("external_addrtaken")
...
diff --git a/llvm/test/Transforms/LowerTypeTests/export-alias.ll b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
index 45b4db6..25d3483 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-alias.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
@@ -1,21 +1,19 @@
; RUN: opt -S %s -passes=lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/exported-funcs.yaml | FileCheck %s
;
-; CHECK: @alias1 = weak alias [8 x i8], ptr @external_addrtaken
-; CHECK: @alias2 = hidden alias [8 x i8], ptr @external_addrtaken
+; CHECK: @alias1 = alias [8 x i8], ptr @external_addrtaken
+; CHECK: @alias2 = alias [8 x i8], ptr @external_addrtaken
; CHECK-NOT: @alias3 = alias
; CHECK-NOT: @not_present
target triple = "x86_64-unknown-linux"
-!cfi.functions = !{!0, !2, !3}
-!aliases = !{!4, !5, !6}
+!cfi.functions = !{!0, !2, !3, !4}
+!aliases = !{!5, !6}
!0 = !{!"external_addrtaken", i8 0, !1}
!1 = !{i64 0, !"typeid1"}
-!2 = !{!"alias1", i8 1, !1}
-; alias2 not included here, this could happen if the only reference to alias2
-; is in a module compiled without cfi-icall
-!3 = !{!"alias3", i8 1, !1}
-!4 = !{!"alias1", !"external_addrtaken", i8 0, i8 1}
-!5 = !{!"alias2", !"external_addrtaken", i8 1, i8 0}
-!6 = !{!"alias3", !"not_present", i8 0, i8 0}
+!2 = !{!"alias1", i8 0, !1}
+!3 = !{!"alias2", i8 0, !1}
+!4 = !{!"alias3", i8 0, !1}
+!5 = !{!"external_addrtaken", !"alias1", !"alias2"}
+!6 = !{!"not_present", !"alias3"}
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
index a13c36f..b932a69 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
@@ -41,7 +41,7 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP4]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], [[SUM_0_LCSSA]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[SUM_0_LCSSA]], [[TMP6]]
; CHECK-NEXT: br label [[WHILE_END5]]
; CHECK: while.end5:
; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
index c98e7d3..482907d 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
@@ -8,12 +8,12 @@ define void @foo(ptr noalias noundef %0, ptr noalias noundef %1) optsize {
; CHECK-LABEL: define void @foo(
; CHECK-SAME: ptr noalias noundef readonly captures(none) [[TMP0:%.*]], ptr noalias noundef writeonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: vector.ph:
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -28
; CHECK-NEXT: br label [[TMP4:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[TMP2:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[TMP4]] ]
; CHECK-NEXT: [[TMP3:%.*]] = sub nuw nsw i64 255, [[INDVARS_IV]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[INVARIANT_GEP]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[TMP3]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 -28
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = load <8 x i32>, ptr [[GEP]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_GATHER]], splat (i32 5)
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
diff --git a/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll b/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
index 19fbc1f..4455016 100644
--- a/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
+++ b/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
@@ -1,3 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -O3 -S < %s | FileCheck %s
+
;; This tests lowering of the implementations of table-based ctz
;; algorithm to the llvm.cttz instruction in the -O3 case.
@@ -13,13 +16,17 @@
;; }
;; Compiled as: clang -O3 test.c -S -emit-llvm -Xclang -disable-llvm-optzns
-; RUN: opt -O3 -S < %s | FileCheck %s
-
-; CHECK: call range(i32 0, 33) i32 @llvm.cttz.i32
-
@ctz1.table = internal constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 16
-define i32 @ctz1(i32 noundef %x) {
+define i32 @ctz(i32 noundef %x) {
+; CHECK-LABEL: define range(i32 0, 32) i32 @ctz(
+; CHECK-SAME: i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: ret i32 [[CONV]]
+;
entry:
%x.addr = alloca i32, align 4
store i32 %x, ptr %x.addr, align 4
@@ -35,3 +42,28 @@ entry:
%conv = sext i8 %2 to i32
ret i32 %conv
}
+
+define i32 @ctz_nonarraygep(i32 noundef %x) {
+; CHECK-LABEL: define range(i32 0, 32) i32 @ctz_nonarraygep(
+; CHECK-SAME: i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %x.addr = alloca i32, align 4
+ store i32 %x, ptr %x.addr, align 4
+ %0 = load i32, ptr %x.addr, align 4
+ %1 = load i32, ptr %x.addr, align 4
+ %sub = sub i32 0, %1
+ %and = and i32 %0, %sub
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds i8, ptr @ctz1.table, i64 %idxprom
+ %2 = load i8, ptr %arrayidx, align 1
+ %conv = sext i8 %2 to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
deleted file mode 100644
index 301e5da..0000000
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
+++ /dev/null
@@ -1,279 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
-
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-
-define void @ldexp_f32i32(ptr %x, ptr %y, i32 %exp) {
-; CHECK-LABEL: @ldexp_f32i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L4]], i32 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L6]], i32 [[EXP]])
-; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
-; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
-; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
-; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp)
- %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp)
- %l5 = tail call float @llvm.ldexp.f32.i32(float %l4, i32 %exp)
- %l7 = tail call float @llvm.ldexp.f32.i32(float %l6, i32 %exp)
- store float %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
- store float %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
- store float %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
- store float %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f64i32(ptr %x, ptr %y, i32 %exp) {
-; CHECK-LABEL: @ldexp_f64i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L4]], i32 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L6]], i32 [[EXP]])
-; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
-; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
-; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
-; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp)
- %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp)
- %l5 = tail call double @llvm.ldexp.f64.i32(double %l4, i32 %exp)
- %l7 = tail call double @llvm.ldexp.f64.i32(double %l6, i32 %exp)
- store double %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
- store double %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
- store double %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
- store double %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f32i64(ptr %x, ptr %y, i64 %exp) {
-; CHECK-LABEL: @ldexp_f32i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L0]], i64 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L2]], i64 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP]])
-; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
-; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
-; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
-; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call float @llvm.ldexp.f32.i64(float %l0, i64 %exp)
- %l3 = tail call float @llvm.ldexp.f32.i64(float %l2, i64 %exp)
- %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp)
- %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp)
- store float %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
- store float %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
- store float %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
- store float %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f64i64(ptr %x, ptr %y, i64 %exp) {
-; CHECK-LABEL: @ldexp_f64i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L0]], i64 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L2]], i64 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP]])
-; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
-; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
-; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
-; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call double @llvm.ldexp.f64.i64(double %l0, i64 %exp)
- %l3 = tail call double @llvm.ldexp.f64.i64(double %l2, i64 %exp)
- %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp)
- %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp)
- store double %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
- store double %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
- store double %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
- store double %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f32i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
-; CHECK-LABEL: @ldexp_f32i32_i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP32:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP32]])
-; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP64:%.*]])
-; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP64]])
-; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
-; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
-; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
-; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp32)
- %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp32)
- %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp64)
- %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp64)
- store float %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
- store float %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
- store float %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
- store float %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f64_i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
-; CHECK-LABEL: @ldexp_f64_i32_i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP32:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP32]])
-; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP64:%.*]])
-; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP64]])
-; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
-; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
-; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
-; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp32)
- %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp32)
- %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp64)
- %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp64)
- store double %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
- store double %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
- store double %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
- store double %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-declare float @llvm.ldexp.f32.i32(float, i32)
-declare double @llvm.ldexp.f64.i32(double, i32)
-declare float @llvm.ldexp.f32.i64(float, i64)
-declare double @llvm.ldexp.f64.i64(double, i64)
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
deleted file mode 100644
index 07a3fe7..0000000
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
+++ /dev/null
@@ -1,280 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
-
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-
-define void @lround_i32f32(ptr %x, ptr %y, i32 %n) {
-; CHECK-LABEL: @lround_i32f32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L6]])
-; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
-; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
-; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
-; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call i32 @llvm.lround.i32.f32(float %l0)
- %l3 = tail call i32 @llvm.lround.i32.f32(float %l2)
- %l5 = tail call i32 @llvm.lround.i32.f32(float %l4)
- %l7 = tail call i32 @llvm.lround.i32.f32(float %l6)
- store i32 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
- store i32 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
- store i32 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
- store i32 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @lround_i32f64(ptr %x, ptr %y, i32 %n) {
-; CHECK-LABEL: @lround_i32f64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L6]])
-; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
-; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
-; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
-; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call i32 @llvm.lround.i32.f64(double %l0)
- %l3 = tail call i32 @llvm.lround.i32.f64(double %l2)
- %l5 = tail call i32 @llvm.lround.i32.f64(double %l4)
- %l7 = tail call i32 @llvm.lround.i32.f64(double %l6)
- store i32 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
- store i32 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
- store i32 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
- store i32 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @lround_i64f32(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @lround_i64f32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.lround.i64.f32(float %l0)
- %l3 = tail call i64 @llvm.lround.i64.f32(float %l2)
- %l5 = tail call i64 @llvm.lround.i64.f32(float %l4)
- %l7 = tail call i64 @llvm.lround.i64.f32(float %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @lround_i64f64(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @lround_i64f64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.lround.i64.f64(double %l0)
- %l3 = tail call i64 @llvm.lround.i64.f64(double %l2)
- %l5 = tail call i64 @llvm.lround.i64.f64(double %l4)
- %l7 = tail call i64 @llvm.lround.i64.f64(double %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @llround_i64f32(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @llround_i64f32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.llround.i64.f32(float %l0)
- %l3 = tail call i64 @llvm.llround.i64.f32(float %l2)
- %l5 = tail call i64 @llvm.llround.i64.f32(float %l4)
- %l7 = tail call i64 @llvm.llround.i64.f32(float %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @llround_i64f64(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @llround_i64f64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.llround.i64.f64(double %l0)
- %l3 = tail call i64 @llvm.llround.i64.f64(double %l2)
- %l5 = tail call i64 @llvm.llround.i64.f64(double %l4)
- %l7 = tail call i64 @llvm.llround.i64.f64(double %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-declare i32 @llvm.lround.i32.f32(float)
-declare i64 @llvm.lround.i64.f32(float)
-declare i64 @llvm.lround.i64.f64(double)
-declare i64 @llvm.llround.i64.f32(float)
-declare i64 @llvm.llround.i64.f64(double)
diff --git a/llvm/test/Transforms/Scalarizer/intrinsics.ll b/llvm/test/Transforms/Scalarizer/intrinsics.ll
index 070c765..cee44ef 100644
--- a/llvm/test/Transforms/Scalarizer/intrinsics.ll
+++ b/llvm/test/Transforms/Scalarizer/intrinsics.ll
@@ -8,7 +8,6 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float>)
declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>)
-declare <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>)
; Ternary fp
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
@@ -33,8 +32,6 @@ declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>)
; Unary fp operand, int return type
declare <2 x i32> @llvm.lrint.v2i32.v2f32(<2 x float>)
declare <2 x i32> @llvm.llrint.v2i32.v2f32(<2 x float>)
-declare <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float>)
-declare <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float>)
; Bool return type, overloaded on fp operand type
declare <2 x i1> @llvm.is.fpclass(<2 x float>, i32)
@@ -162,22 +159,6 @@ define <2 x float> @scalarize_powi_v2f32(<2 x float> %x, i32 %y) #0 {
ret <2 x float> %powi
}
-define <2 x float> @scalarize_ldexp_v2f32(<2 x float> %x, <2 x i32> %y) #0 {
-; CHECK-LABEL: @scalarize_ldexp_v2f32(
-; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
-; CHECK-NEXT: [[Y:%.*]] = extractelement <2 x i32> [[Y1:%.*]], i64 0
-; CHECK-NEXT: [[POWI_I0:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I0]], i32 [[Y]])
-; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
-; CHECK-NEXT: [[Y_I1:%.*]] = extractelement <2 x i32> [[Y1]], i64 1
-; CHECK-NEXT: [[POWI_I1:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I1]], i32 [[Y_I1]])
-; CHECK-NEXT: [[POWI_UPTO0:%.*]] = insertelement <2 x float> poison, float [[POWI_I0]], i64 0
-; CHECK-NEXT: [[POWI:%.*]] = insertelement <2 x float> [[POWI_UPTO0]], float [[POWI_I1]], i64 1
-; CHECK-NEXT: ret <2 x float> [[POWI]]
-;
- %powi = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> %y)
- ret <2 x float> %powi
-}
-
define <2 x i32> @scalarize_smul_fix_sat_v2i32(<2 x i32> %x) #0 {
; CHECK-LABEL: @scalarize_smul_fix_sat_v2i32(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
@@ -262,34 +243,6 @@ define <2 x i32> @scalarize_llrint(<2 x float> %x) #0 {
ret <2 x i32> %rnd
}
-define <2 x i32> @scalarize_lround(<2 x float> %x) #0 {
-; CHECK-LABEL: @scalarize_lround(
-; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
-; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I0]])
-; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
-; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I1]])
-; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
-; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
-; CHECK-NEXT: ret <2 x i32> [[RND]]
-;
- %rnd = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> %x)
- ret <2 x i32> %rnd
-}
-
-define <2 x i32> @scalarize_llround(<2 x float> %x) #0 {
-; CHECK-LABEL: @scalarize_llround(
-; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
-; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I0]])
-; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
-; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I1]])
-; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
-; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
-; CHECK-NEXT: ret <2 x i32> [[RND]]
-;
- %rnd = call <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float> %x)
- ret <2 x i32> %rnd
-}
-
define <2 x i1> @scalarize_is_fpclass(<2 x float> %x) #0 {
; CHECK-LABEL: @scalarize_is_fpclass(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll
index b309682..2fad306c5 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll
@@ -47,10 +47,8 @@ define void @testDisjointOrSplits(ptr %p) {
; CHECK-LABEL: define void @testDisjointOrSplits(
; CHECK-SAME: ptr [[P:%.*]]) {
; CHECK-NEXT: [[VAR:%.*]] = tail call i64 @foo()
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[VAR]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 10
-; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[P]], i64 [[VAR]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[UGLYGEP]], i64 10
; CHECK-NEXT: store i8 0, ptr [[TMP4]], align 1
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll
index b0e88ef..a6b38bc 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll
@@ -31,11 +31,9 @@ define void @test_A_sub_B_add_ConstantInt(ptr %p) {
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[REM]] to i64
; CHECK-NEXT: [[SUB22:%.*]] = sub i64 [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[SUB22]], 2
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 2044
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 2044
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[UGLYGEP]], i64 [[TMP4]]
; CHECK-NEXT: store float 1.000000e+00, ptr [[TMP7]], align 4
; CHECK-NEXT: br label [[COND_END]]
; CHECK: cond.end:
diff --git a/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll b/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll
new file mode 100644
index 0000000..32b7719
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=simplifycfg -S < %s | FileCheck %s
+
+; Allow jump-threading when values defined in the block are live outside of the block
+; to those destinations in which the values are dead.
+
+define void @testA(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b) {
+; CHECK-LABEL: define void @testA(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: store i64 [[TMP0]], ptr [[PTRB]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; %value is live outside of block mainB, but jump-threading
+; can still occur to destination mainC, since %value is dead there.
+; Subsequent CFG simplifications will create one if block.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ store i64 %value, ptr %ptrB
+ br label %mainC
+
+mainC:
+ ret void
+}
+
+
+define void @testB(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: define void @testB(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK-NEXT: [[PTRC:%.*]] = select i1 [[COND2]], ptr [[PTR_ARM1]], ptr [[PTR_ARM2]]
+; CHECK-NEXT: store i64 [[TMP0]], ptr [[PTRC]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; Use of %value is not in either immediate destination of mainB.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ ret void
+}
+
+
+; Jump-threading is not done since %value is live in both destinations.
+define void @testA_negative(ptr %ptrA, ptr %ptrB, ptr %ptrD, i64 %a, i64 %b) {
+; CHECK-LABEL: define void @testA_negative(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], ptr [[PTRD:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*]]:
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: br label %[[MAINB]]
+; CHECK: [[MAINB]]:
+; CHECK-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFB]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRB]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRD]], align 4
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ store i64 %value, ptr %ptrB
+ br label %mainC
+
+mainC:
+ store i64 %value, ptr %ptrD
+ ret void
+}
+
+
+; Jump-threading is not done since %value is live in both destinations.
+define void @testB_negative(ptr %ptrA, ptr %ptrB, ptr %ptrD, i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: define void @testB_negative(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], ptr [[PTRD:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*]]:
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: br label %[[MAINB]]
+; CHECK: [[MAINB]]:
+; CHECK-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFB]]:
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK-NEXT: [[PTRC:%.*]] = select i1 [[COND2]], ptr [[PTR_ARM1]], ptr [[PTR_ARM2]]
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRC]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRD]], align 4
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ store i64 %value, ptr %ptrD
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll b/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll
new file mode 100644
index 0000000..6868693
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=simplifycfg -S -max-jump-threading-live-blocks=3 < %s | FileCheck %s --check-prefixes=CHECK_LIMIT_3
+; RUN: opt -passes=simplifycfg -S -max-jump-threading-live-blocks=4 < %s | FileCheck %s --check-prefixes=CHECK_LIMIT_4
+
+; Test option -max-jump-threading-live-blocks=<num>
+
+define void @testB(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b, i64 %c) {
+; CHECK_LIMIT_3-LABEL: define void @testB(
+; CHECK_LIMIT_3-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK_LIMIT_3-NEXT: [[MAINA:.*]]:
+; CHECK_LIMIT_3-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK_LIMIT_3: [[IFA]]:
+; CHECK_LIMIT_3-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[MAINB]]
+; CHECK_LIMIT_3: [[MAINB]]:
+; CHECK_LIMIT_3-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK_LIMIT_3: [[IFB]]:
+; CHECK_LIMIT_3-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND2]], label %[[IFB_ARM1:.*]], label %[[IFB_ARM2:.*]]
+; CHECK_LIMIT_3: [[IFB_ARM1]]:
+; CHECK_LIMIT_3-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK_LIMIT_3-NEXT: store i128 0, ptr [[PTR_ARM1]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[IFB_JOIN:.*]]
+; CHECK_LIMIT_3: [[IFB_ARM2]]:
+; CHECK_LIMIT_3-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK_LIMIT_3-NEXT: store i128 0, ptr [[PTR_ARM2]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[IFB_JOIN]]
+; CHECK_LIMIT_3: [[IFB_JOIN]]:
+; CHECK_LIMIT_3-NEXT: [[PTRC:%.*]] = phi ptr [ [[PTR_ARM1]], %[[IFB_ARM1]] ], [ [[PTR_ARM2]], %[[IFB_ARM2]] ]
+; CHECK_LIMIT_3-NEXT: store i64 [[VALUE]], ptr [[PTRC]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[MAINC]]
+; CHECK_LIMIT_3: [[MAINC]]:
+; CHECK_LIMIT_3-NEXT: ret void
+;
+; CHECK_LIMIT_4-LABEL: define void @testB(
+; CHECK_LIMIT_4-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK_LIMIT_4-NEXT: [[MAINA:.*:]]
+; CHECK_LIMIT_4-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK_LIMIT_4-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK_LIMIT_4: [[IFA]]:
+; CHECK_LIMIT_4-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK_LIMIT_4-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK_LIMIT_4-NEXT: br i1 [[COND2]], label %[[IFB_ARM1:.*]], label %[[IFB_ARM2:.*]]
+; CHECK_LIMIT_4: [[IFB_ARM1]]:
+; CHECK_LIMIT_4-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK_LIMIT_4-NEXT: store i128 0, ptr [[PTR_ARM1]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[IFB_JOIN:.*]]
+; CHECK_LIMIT_4: [[IFB_ARM2]]:
+; CHECK_LIMIT_4-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK_LIMIT_4-NEXT: store i128 0, ptr [[PTR_ARM2]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[IFB_JOIN]]
+; CHECK_LIMIT_4: [[IFB_JOIN]]:
+; CHECK_LIMIT_4-NEXT: [[PTRC:%.*]] = phi ptr [ [[PTR_ARM1]], %[[IFB_ARM1]] ], [ [[PTR_ARM2]], %[[IFB_ARM2]] ]
+; CHECK_LIMIT_4-NEXT: store i64 [[TMP0]], ptr [[PTRC]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[MAINC]]
+; CHECK_LIMIT_4: [[MAINC]]:
+; CHECK_LIMIT_4-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; Use of %value is not in either immediate destination of mainB.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ store i128 0, ptr %ptr_arm1
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ store i128 0, ptr %ptr_arm2
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ ret void
+}
diff --git a/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll b/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
index efc04e9..74693c1 100644
--- a/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
+++ b/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
@@ -7,11 +7,16 @@ define hidden void @Func() !type !0 {
ret void
}
-; CHECK1: !aliases = !{![[A1:[0-9]+]], ![[A2:[0-9]+]], ![[A3:[0-9]+]]}
+; CHECK1: !cfi.functions = !{![[F1:[0-9]+]], ![[F2:[0-9]+]], ![[F3:[0-9]+]], ![[F4:[0-9]+]]}
+; CHECK1: !aliases = !{![[A:[0-9]+]]}
-; CHECK1: ![[A1]] = !{!"Alias", !"Func", i8 1, i8 0}
-; CHECK1: ![[A2]] = !{!"Hidden_Alias", !"Func", i8 1, i8 0}
-; CHECK1: ![[A3]] = !{!"Weak_Alias", !"Func", i8 0, i8 1}
+; CHECK1: ![[F1]] = !{!"Func", i8 0, ![[T:[0-9]+]]}
+; CHECK1: ![[T]] = !{i64 0, !"_ZTSFvvE"}
+; CHECK1: ![[F2]] = !{!"Alias", i8 0, ![[T]]}
+; CHECK1: ![[F3]] = !{!"Hidden_Alias", i8 0, ![[T]]}
+; CHECK1: ![[F4]] = !{!"Weak_Alias", i8 0, ![[T]]}
+;
+; CHECK1: ![[A]] = !{!"Func", !"Alias", !"Hidden_Alias", !"Weak_Alias"}
@Alias = hidden alias void (), ptr @Func
@Hidden_Alias = hidden alias void (), ptr @Func
@Weak_Alias = weak alias void (), ptr @Func
diff --git a/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg b/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg
new file mode 100644
index 0000000..78dd74c
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "SPIRV" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll b/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll
new file mode 100644
index 0000000..6f4c80d
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll
@@ -0,0 +1,889 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=vector-combine -data-layout=E -mtriple=spirv-unknown-vulkan1.3-library %s | FileCheck %s --check-prefix=SPIRV
+
+define void @insert_store(ptr %q, i8 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 3
+ store <16 x i8> %vecins, ptr %q, align 16
+ ret void
+}
+
+define void @insert_store_i16_align1(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_i16_align1(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 3
+ store <8 x i16> %vecins, ptr %q, align 1
+ ret void
+}
+
+; To verify case when index is out of bounds
+define void @insert_store_outofbounds(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_outofbounds(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 9
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 9
+ store <8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_vscale(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_vscale(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <vscale x 8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 8 x i16>, ptr %q
+ %vecins = insertelement <vscale x 8 x i16> %0, i16 %s, i32 3
+ store <vscale x 8 x i16> %vecins, ptr %q
+ ret void
+}
+
+; To verify the case that index exceeds the minimum number
+; of elements of a scalable vector type.
+define void @insert_store_vscale_exceeds(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_vscale_exceeds(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 8 x i16> [[TMP0]], i16 [[S]], i32 9
+; SPIRV-NEXT: store <vscale x 8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 8 x i16>, ptr %q
+ %vecins = insertelement <vscale x 8 x i16> %0, i16 %s, i32 9
+ store <vscale x 8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_v9i4(ptr %q, i4 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_v9i4(
+; SPIRV-SAME: ptr [[Q:%.*]], i4 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <9 x i4>, ptr [[Q]], align 8
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <9 x i4> [[TMP0]], i4 [[S]], i32 3
+; SPIRV-NEXT: store <9 x i4> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <9 x i4>, ptr %q
+ %vecins = insertelement <9 x i4> %0, i4 %s, i32 3
+ store <9 x i4> %vecins, ptr %q, align 1
+ ret void
+}
+
+define void @insert_store_v4i27(ptr %q, i27 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_v4i27(
+; SPIRV-SAME: ptr [[Q:%.*]], i27 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <4 x i27>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <4 x i27> [[TMP0]], i27 [[S]], i32 3
+; SPIRV-NEXT: store <4 x i27> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <4 x i27>, ptr %q
+ %vecins = insertelement <4 x i27> %0, i27 %s, i32 3
+ store <4 x i27> %vecins, ptr %q, align 1
+ ret void
+}
+
+define void @insert_store_v32i1(ptr %p) {
+; SPIRV-LABEL: define void @insert_store_v32i1(
+; SPIRV-SAME: ptr [[P:%.*]]) {
+; SPIRV-NEXT: [[VEC:%.*]] = load <32 x i1>, ptr [[P]], align 4
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <32 x i1> [[VEC]], i1 true, i64 0
+; SPIRV-NEXT: store <32 x i1> [[INS]], ptr [[P]], align 4
+; SPIRV-NEXT: ret void
+;
+ %vec = load <32 x i1>, ptr %p
+ %ins = insertelement <32 x i1> %vec, i1 true, i64 0
+ store <32 x i1> %ins, ptr %p
+ ret void
+}
+
+define void @insert_store_blk_differ(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_blk_differ(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: br label %[[CONT:.*]]
+; SPIRV: [[CONT]]:
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ br label %cont
+cont:
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 3
+ store <8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the case that the index is not a constant, and
+; the vector type is scalable.
+define void @insert_store_vscale_nonconst(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify align here is narrowed to scalar store size
+define void @insert_store_nonconst_large_alignment(ptr %q, i32 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_large_alignment(
+; SPIRV-SAME: ptr [[Q:%.*]], i32 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <4 x i32>, ptr [[Q]], align 128
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <4 x i32> [[I]], i32 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <4 x i32> [[VECINS]], ptr [[Q]], align 128
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %i = load <4 x i32>, ptr %q, align 128
+ %vecins = insertelement <4 x i32> %i, i32 %s, i32 %idx
+ store <4 x i32> %vecins, ptr %q, align 128
+ ret void
+}
+
+define void @insert_store_nonconst_align_maximum_8(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_maximum_8(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 8
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 8
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 8
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 8
+ ret void
+}
+
+define void @insert_store_nonconst_align_maximum_4(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_maximum_4(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 4
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 4
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 4
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 4
+ ret void
+}
+
+define void @insert_store_nonconst_align_larger(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_larger(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 4
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 2
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 4
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 2
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+declare void @maythrow() readnone
+
+define void @insert_store_nonconst_index_not_known_valid_by_assume_after_load(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_assume_after_load(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: call void @maythrow()
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ %0 = load <16 x i8>, ptr %q
+ call void @maythrow()
+ call void @llvm.assume(i1 %cmp)
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 17
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 17
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 17
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 17
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+declare void @llvm.assume(i1)
+
+define void @insert_store_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_base_frozen_and_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_base_frozen_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_FROZEN:%.*]] = freeze i32 [[IDX]]
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX_FROZEN]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.frozen = freeze i32 %idx
+ %idx.clamped = and i32 %idx.frozen, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_frozen_and_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_frozen_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[IDX_CLAMPED_FROZEN:%.*]] = freeze i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED_FROZEN]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %idx.clamped.frozen = freeze i32 %idx.clamped
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped.frozen
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 31
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 31
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_base_frozen_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_base_frozen_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_FROZEN:%.*]] = freeze i32 [[IDX]]
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX_FROZEN]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.frozen = freeze i32 %idx
+ %idx.clamped = urem i32 %idx.frozen, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_frozen_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_frozen_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[IDX_CLAMPED_FROZEN:%.*]] = freeze i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED_FROZEN]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %idx.clamped.frozen = freeze i32 %idx.clamped
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped.frozen
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be vaild by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_ptr_strip(ptr %q, i8 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_ptr_strip(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 3
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @volatile_update(ptr %q, ptr %p, i8 zeroext %s) {
+; SPIRV-LABEL: define void @volatile_update(
+; SPIRV-SAME: ptr [[Q:%.*]], ptr [[P:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS0:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store volatile <16 x i8> [[VECINS0]], ptr [[Q]], align 16
+; SPIRV-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: [[VECINS1:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[S]], i32 1
+; SPIRV-NEXT: store <16 x i8> [[VECINS1]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins0 = insertelement <16 x i8> %0, i8 %s, i32 3
+ store volatile <16 x i8> %vecins0, ptr %q
+
+ %1 = load volatile <16 x i8>, ptr %p
+ %vecins1 = insertelement <16 x i8> %1, i8 %s, i32 1
+ store <16 x i8> %vecins1, ptr %p
+ ret void
+}
+
+define void @insert_store_addr_differ(ptr %p, ptr %q, i8 %s) {
+; SPIRV-LABEL: define void @insert_store_addr_differ(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i8 [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %ld = load <16 x i8>, ptr %p
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %q
+ ret void
+}
+
+; We can't transform if any instr could modify memory in between.
+define void @insert_store_mem_modify(ptr %p, ptr %q, ptr noalias %r, i8 %s, i32 %m) {
+; SPIRV-LABEL: define void @insert_store_mem_modify(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr noalias [[R:%.*]], i8 [[S:%.*]], i32 [[M:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[Q]], align 16
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[P]], align 16
+; SPIRV-NEXT: [[LD2:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[R]], align 16
+; SPIRV-NEXT: [[INS2:%.*]] = insertelement <16 x i8> [[LD2]], i8 [[S]], i32 7
+; SPIRV-NEXT: store <16 x i8> [[INS2]], ptr [[Q]], align 16
+; SPIRV-NEXT: [[LD3:%.*]] = load <4 x i32>, ptr [[P]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[P]], align 16
+; SPIRV-NEXT: [[INS3:%.*]] = insertelement <4 x i32> [[LD3]], i32 [[M]], i32 0
+; SPIRV-NEXT: store <4 x i32> [[INS3]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ ; p may alias q
+ %ld = load <16 x i8>, ptr %p
+ store <16 x i8> zeroinitializer, ptr %q
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %p
+
+ ; p never aliases r
+ %ld2 = load <16 x i8>, ptr %q
+ store <16 x i8> zeroinitializer, ptr %r
+ %ins2 = insertelement <16 x i8> %ld2, i8 %s, i32 7
+ store <16 x i8> %ins2, ptr %q
+
+ ; p must alias ptr0
+ %ld3 = load <4 x i32>, ptr %p
+ store <16 x i8> zeroinitializer, ptr %p
+ %ins3 = insertelement <4 x i32> %ld3, i32 %m, i32 0
+ store <4 x i32> %ins3, ptr %p
+
+ ret void
+}
+
+; Check cases when calls may modify memory
+define void @insert_store_with_call(ptr %p, ptr %q, i8 %s) {
+; SPIRV-LABEL: define void @insert_store_with_call(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i8 [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: call void @maywrite(ptr [[P]])
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[P]], align 16
+; SPIRV-NEXT: call void @foo()
+; SPIRV-NEXT: [[LD2:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: call void @nowrite(ptr [[P]])
+; SPIRV-NEXT: [[INS2:%.*]] = insertelement <16 x i8> [[LD2]], i8 [[S]], i32 7
+; SPIRV-NEXT: store <16 x i8> [[INS2]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %ld = load <16 x i8>, ptr %p
+ call void @maywrite(ptr %p)
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %p
+ call void @foo() ; Barrier
+ %ld2 = load <16 x i8>, ptr %p
+ call void @nowrite(ptr %p)
+ %ins2 = insertelement <16 x i8> %ld2, i8 %s, i32 7
+ store <16 x i8> %ins2, ptr %p
+ ret void
+}
+
+declare void @foo()
+declare void @maywrite(ptr)
+declare void @nowrite(ptr) readonly
+
+; To test if number of instructions in-between exceeds the limit (default 30),
+; the combine will quit.
+define i32 @insert_store_maximum_scan_instrs(i32 %arg, ptr %arg1, ptr %arg2, i8 zeroext %arg3) {
+; SPIRV-LABEL: define i32 @insert_store_maximum_scan_instrs(
+; SPIRV-SAME: i32 [[ARG:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i8 zeroext [[ARG3:%.*]]) {
+; SPIRV-NEXT: [[BB:.*:]]
+; SPIRV-NEXT: [[I:%.*]] = or i32 [[ARG]], 1
+; SPIRV-NEXT: [[I4:%.*]] = load <16 x i8>, ptr [[ARG2]], align 16
+; SPIRV-NEXT: [[I5:%.*]] = tail call i32 @bar(i32 [[I]], i1 true)
+; SPIRV-NEXT: [[I6:%.*]] = shl i32 [[ARG]], [[I5]]
+; SPIRV-NEXT: [[I7:%.*]] = lshr i32 [[I6]], 26
+; SPIRV-NEXT: [[I8:%.*]] = trunc i32 [[I7]] to i8
+; SPIRV-NEXT: [[I9:%.*]] = and i8 [[I8]], 31
+; SPIRV-NEXT: [[I10:%.*]] = lshr i32 [[I6]], 11
+; SPIRV-NEXT: [[I11:%.*]] = and i32 [[I10]], 32767
+; SPIRV-NEXT: [[I12:%.*]] = zext i8 [[I9]] to i64
+; SPIRV-NEXT: [[I13:%.*]] = getelementptr inbounds i16, ptr [[ARG1]], i64 [[I12]]
+; SPIRV-NEXT: [[I14:%.*]] = load i16, ptr [[I13]], align 2
+; SPIRV-NEXT: [[I15:%.*]] = zext i16 [[I14]] to i32
+; SPIRV-NEXT: [[I16:%.*]] = add nuw nsw i8 [[I9]], 1
+; SPIRV-NEXT: [[I17:%.*]] = zext i8 [[I16]] to i64
+; SPIRV-NEXT: [[I18:%.*]] = getelementptr inbounds i16, ptr [[ARG1]], i64 [[I17]]
+; SPIRV-NEXT: [[I19:%.*]] = load i16, ptr [[I18]], align 2
+; SPIRV-NEXT: [[I20:%.*]] = zext i16 [[I19]] to i32
+; SPIRV-NEXT: [[I21:%.*]] = sub nsw i32 [[I20]], [[I15]]
+; SPIRV-NEXT: [[I22:%.*]] = mul nsw i32 [[I11]], [[I21]]
+; SPIRV-NEXT: [[I23:%.*]] = ashr i32 [[I22]], 15
+; SPIRV-NEXT: [[I24:%.*]] = shl nuw nsw i32 [[I5]], 15
+; SPIRV-NEXT: [[I25:%.*]] = xor i32 [[I24]], 1015808
+; SPIRV-NEXT: [[I26:%.*]] = add nuw nsw i32 [[I25]], [[I15]]
+; SPIRV-NEXT: [[I27:%.*]] = add nsw i32 [[I26]], [[I23]]
+; SPIRV-NEXT: [[I28:%.*]] = sitofp i32 [[ARG]] to double
+; SPIRV-NEXT: [[I29:%.*]] = tail call double @llvm.log2.f64(double [[I28]])
+; SPIRV-NEXT: [[I30:%.*]] = fptosi double [[I29]] to i32
+; SPIRV-NEXT: [[I31:%.*]] = shl nsw i32 [[I30]], 15
+; SPIRV-NEXT: [[I32:%.*]] = or i32 [[I31]], 4
+; SPIRV-NEXT: [[I33:%.*]] = icmp eq i32 [[I27]], [[I32]]
+; SPIRV-NEXT: [[I34:%.*]] = select i1 [[I33]], i32 [[ARG]], i32 [[I31]]
+; SPIRV-NEXT: [[I35:%.*]] = lshr i32 [[I34]], 1
+; SPIRV-NEXT: [[I36:%.*]] = insertelement <16 x i8> [[I4]], i8 [[ARG3]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[I36]], ptr [[ARG2]], align 16
+; SPIRV-NEXT: ret i32 [[I35]]
+;
+bb:
+ %i = or i32 %arg, 1
+ %i4 = load <16 x i8>, ptr %arg2, align 16
+ %i5 = tail call i32 @bar(i32 %i, i1 true)
+ %i6 = shl i32 %arg, %i5
+ %i7 = lshr i32 %i6, 26
+ %i8 = trunc i32 %i7 to i8
+ %i9 = and i8 %i8, 31
+ %i10 = lshr i32 %i6, 11
+ %i11 = and i32 %i10, 32767
+ %i12 = zext i8 %i9 to i64
+ %i13 = getelementptr inbounds i16, ptr %arg1, i64 %i12
+ %i14 = load i16, ptr %i13, align 2
+ %i15 = zext i16 %i14 to i32
+ %i16 = add nuw nsw i8 %i9, 1
+ %i17 = zext i8 %i16 to i64
+ %i18 = getelementptr inbounds i16, ptr %arg1, i64 %i17
+ %i19 = load i16, ptr %i18, align 2
+ %i20 = zext i16 %i19 to i32
+ %i21 = sub nsw i32 %i20, %i15
+ %i22 = mul nsw i32 %i11, %i21
+ %i23 = ashr i32 %i22, 15
+ %i24 = shl nuw nsw i32 %i5, 15
+ %i25 = xor i32 %i24, 1015808
+ %i26 = add nuw nsw i32 %i25, %i15
+ %i27 = add nsw i32 %i26, %i23
+ %i28 = sitofp i32 %arg to double
+ %i29 = tail call double @llvm.log2.f64(double %i28)
+ %i30 = fptosi double %i29 to i32
+ %i31 = shl nsw i32 %i30, 15
+ %i32 = or i32 %i31, 4
+ %i33 = icmp eq i32 %i27, %i32
+ %i34 = select i1 %i33, i32 %arg, i32 %i31
+ %i35 = lshr i32 %i34, 1
+ %i36 = insertelement <16 x i8> %i4, i8 %arg3, i32 3
+ store <16 x i8> %i36, ptr %arg2, align 16
+ ret i32 %i35
+}
+
+declare i32 @bar(i32, i1) readonly
+declare double @llvm.log2.f64(double)
+
diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index 143cc38..915e387 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -278,6 +278,7 @@ tools.extend(
]
)
+
# Find (major, minor) version of ptxas
def ptxas_version(ptxas):
ptxas_cmd = subprocess.Popen([ptxas, "--version"], stdout=subprocess.PIPE)
@@ -451,7 +452,7 @@ if config.link_llvm_dylib:
"%llvmdylib",
"{}/libLLVM{}.{}".format(
config.llvm_shlib_dir, config.llvm_shlib_ext, config.llvm_dylib_version
- )
+ ),
)
)
@@ -582,6 +583,7 @@ def have_ld64_plugin_support():
if have_ld64_plugin_support():
config.available_features.add("ld64_plugin")
+
def host_unwind_supports_jit():
# Do we expect the host machine to support JIT registration of clang's
# default unwind info format for the host (e.g. eh-frames, compact-unwind,
@@ -589,7 +591,7 @@ def host_unwind_supports_jit():
# Linux and the BSDs use DWARF eh-frames and all known unwinders support
# register_frame at minimum.
- if platform.system() in [ "Linux", "FreeBSD", "NetBSD" ]:
+ if platform.system() in ["Linux", "FreeBSD", "NetBSD"]:
return True
# Windows does not support frame info without the ORC runtime.
@@ -601,11 +603,7 @@ def host_unwind_supports_jit():
# compact-unwind only, and JIT'd registration is not available before
# macOS 14.0.
if platform.system() == "Darwin":
-
- assert (
- "arm64" in config.host_triple
- or "x86_64" in config.host_triple
- )
+ assert "arm64" in config.host_triple or "x86_64" in config.host_triple
if "x86_64" in config.host_triple:
return True
@@ -627,6 +625,7 @@ def host_unwind_supports_jit():
return False
+
if host_unwind_supports_jit():
config.available_features.add("host-unwind-supports-jit")
diff --git a/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test b/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
index f2fe794..db223cd 100644
--- a/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
+++ b/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
@@ -5,7 +5,13 @@
# RUN: yaml2obj %t/stmt_seq_macho.o.yaml -o %t/stmt_seq_macho.o
# RUN: dsymutil --flat --verify-dwarf=none -oso-prepend-path %t %t/stmt_seq_macho.exe -o %t/stmt_seq_macho.dSYM
# RUN: llvm-dwarfdump --debug-info --debug-line -v %t/stmt_seq_macho.dSYM | sort | FileCheck %s -check-prefix=CHECK_DSYM
+# RUN: llvm-dwarfdump --debug-info --debug-line -v %t/stmt_seq_macho.dSYM > %t/stmt_seq_macho.dSYM.txt
+# RUN: cat %t/stmt_seq_macho.dSYM.txt | sort | FileCheck %s -check-prefix=CHECK_DSYM
+# RUN: cat %t/stmt_seq_macho.dSYM.txt | FileCheck %s -check-prefix=CHECK_NO_INVALID_OFFSET
+# RUN: cat stmt_seq_macho.dSYM.txt | grep DW_AT_LLVM_stmt_sequence | sort | uniq -d | wc -l | FileCheck %s -check-prefix=CHECK_NO_DUPLICATES
+# CHECK_NO_DUPLICATES: 0
+# CHECK_NO_INVALID_OFFSET-NOT: DW_AT_LLVM_stmt_sequence{{.*}}0xfffffff
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET1:(0x[0-9a-f]+)]])
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET2:(0x[0-9a-f]+)]])
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET3:(0x[0-9a-f]+)]])
@@ -18,6 +24,9 @@
#--- stmt_seq_macho.cpp
#define ATTRIB extern "C" __attribute__((noinline))
+ATTRIB int function1_copy1(int a) {
+ return ++a;
+}
ATTRIB int function3_copy1(int a) {
int b = a + 3;
@@ -51,6 +60,7 @@ int main() {
sum += function2_copy2(3);
sum += function3_copy2(41);
sum += function2_copy1(11);
+ sum += function1_copy1(42);
length_error e("test");
return sum;
}
@@ -108,9 +118,9 @@ LoadCommands:
cmdsize: 1032
segname: ''
vmaddr: 0
- vmsize: 2793
+ vmsize: 3125
fileoff: 1208
- filesize: 2793
+ filesize: 3125
maxprot: 7
initprot: 7
nsects: 12
@@ -119,18 +129,18 @@ LoadCommands:
- sectname: __text
segname: __TEXT
addr: 0x0
- size: 128
+ size: 148
offset: 0x4B8
align: 2
- reloff: 0xFA8
- nreloc: 7
+ reloff: 0x10F0
+ nreloc: 8
flags: 0x80000400
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00100011C0035FD600580051C0035FD600100011C0035FD600580051C0035FD6FFC300D1F44F01A9FD7B02A9FD8300916000805200000094F30300AA20058052000000941400130B6001805200000094F30300AA0100009021000091E03F0091000000948002130BFD7B42A9F44F41A9FFC30091C0035FD600000014C0035FD6
+ content: 00040011C0035FD600100011C0035FD600580051C0035FD600100011C0035FD600580051C0035FD6FFC300D1F44F01A9FD7B02A9FD8300916000805200000094F30300AA20058052000000941400130B6001805200000094F30300AA40058052000000947302000B0100009021000091E03F0091000000948002130BFD7B42A9F44F41A9FFC30091C0035FD600000014C0035FD6
relocations:
- - address: 0x78
+ - address: 0x8C
symbolnum: 4
pcrel: true
length: 2
@@ -138,7 +148,7 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x60
+ - address: 0x74
symbolnum: 3
pcrel: true
length: 2
@@ -146,7 +156,7 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x58
+ - address: 0x6C
symbolnum: 1
pcrel: false
length: 2
@@ -154,7 +164,7 @@ LoadCommands:
type: 4
scattered: false
value: 0
- - address: 0x54
+ - address: 0x68
symbolnum: 1
pcrel: true
length: 2
@@ -162,7 +172,7 @@ LoadCommands:
type: 3
scattered: false
value: 0
- - address: 0x4C
+ - address: 0x60
symbolnum: 5
pcrel: true
length: 2
@@ -170,16 +180,24 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x40
- symbolnum: 8
+ - address: 0x54
+ symbolnum: 6
pcrel: true
length: 2
extern: true
type: 2
scattered: false
value: 0
- - address: 0x34
- symbolnum: 6
+ - address: 0x48
+ symbolnum: 9
+ pcrel: true
+ length: 2
+ extern: true
+ type: 2
+ scattered: false
+ value: 0
+ - address: 0x3C
+ symbolnum: 7
pcrel: true
length: 2
extern: true
@@ -188,9 +206,9 @@ LoadCommands:
value: 0
- sectname: __cstring
segname: __TEXT
- addr: 0x80
+ addr: 0x94
size: 5
- offset: 0x538
+ offset: 0x54C
align: 0
reloff: 0x0
nreloc: 0
@@ -201,9 +219,9 @@ LoadCommands:
content: '7465737400'
- sectname: __debug_loc
segname: __DWARF
- addr: 0x85
+ addr: 0x99
size: 412
- offset: 0x53D
+ offset: 0x551
align: 0
reloff: 0x0
nreloc: 0
@@ -211,12 +229,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00000000000000000400000000000000010050040000000000000008000000000000000400A301509F0000000000000000000000000000000000000000000000000400000000000000030070039F0000000000000000000000000000000008000000000000000C000000000000000100500C0000000000000010000000000000000400A301509F0000000000000000000000000000000010000000000000001400000000000000010050140000000000000018000000000000000400A301509F0000000000000000000000000000000010000000000000001400000000000000030070039F0000000000000000000000000000000018000000000000001C000000000000000100501C0000000000000020000000000000000400A301509F000000000000000000000000000000001C0000000000000020000000000000000100500000000000000000000000000000000030000000000000003C00000000000000030011009F3C0000000000000048000000000000000100634800000000000000540000000000000001006400000000000000000000000000000000
+ content: 08000000000000000C000000000000000100500C0000000000000010000000000000000400A301509F0000000000000000000000000000000008000000000000000C00000000000000030070039F0000000000000000000000000000000010000000000000001400000000000000010050140000000000000018000000000000000400A301509F0000000000000000000000000000000018000000000000001C000000000000000100501C0000000000000020000000000000000400A301509F0000000000000000000000000000000018000000000000001C00000000000000030070039F0000000000000000000000000000000020000000000000002400000000000000010050240000000000000028000000000000000400A301509F00000000000000000000000000000000240000000000000028000000000000000100500000000000000000000000000000000038000000000000004400000000000000030011009F4400000000000000500000000000000001006350000000000000005C0000000000000001006400000000000000000000000000000000
- sectname: __debug_abbrev
segname: __DWARF
- addr: 0x221
- size: 359
- offset: 0x6D9
+ addr: 0x235
+ size: 372
+ offset: 0x6ED
align: 0
reloff: 0x0
nreloc: 0
@@ -226,18 +244,34 @@ LoadCommands:
reserved3: 0x0
- sectname: __debug_info
segname: __DWARF
- addr: 0x388
- size: 686
- offset: 0x840
+ addr: 0x3A9
+ size: 747
+ offset: 0x861
align: 0
- reloff: 0xFE0
- nreloc: 14
+ reloff: 0x1130
+ nreloc: 16
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
relocations:
- - address: 0x26A
+ - address: 0x2A7
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0x28E
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0x253
symbolnum: 1
pcrel: false
length: 3
@@ -245,7 +279,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x251
+ - address: 0x1F5
symbolnum: 1
pcrel: false
length: 3
@@ -253,7 +287,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x216
+ - address: 0x1E1
symbolnum: 1
pcrel: false
length: 3
@@ -261,7 +295,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x1B8
+ - address: 0x1CE
symbolnum: 1
pcrel: false
length: 3
@@ -269,7 +303,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x1A5
+ - address: 0x1BA
symbolnum: 1
pcrel: false
length: 3
@@ -277,7 +311,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x191
+ - address: 0x1A7
symbolnum: 1
pcrel: false
length: 3
@@ -285,7 +319,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x17E
+ - address: 0x169
symbolnum: 1
pcrel: false
length: 3
@@ -293,7 +327,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x140
+ - address: 0x12D
symbolnum: 1
pcrel: false
length: 3
@@ -301,7 +335,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x104
+ - address: 0xF1
symbolnum: 1
pcrel: false
length: 3
@@ -309,7 +343,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xC8
+ - address: 0xC4
symbolnum: 1
pcrel: false
length: 3
@@ -317,7 +351,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x9B
+ - address: 0x88
symbolnum: 1
pcrel: false
length: 3
@@ -351,9 +385,9 @@ LoadCommands:
value: 0
- sectname: __debug_str
segname: __DWARF
- addr: 0x636
- size: 239
- offset: 0xAEE
+ addr: 0x694
+ size: 400
+ offset: 0xB4C
align: 0
reloff: 0x0
nreloc: 0
@@ -363,9 +397,9 @@ LoadCommands:
reserved3: 0x0
- sectname: __apple_names
segname: __DWARF
- addr: 0x725
- size: 260
- offset: 0xBDD
+ addr: 0x824
+ size: 288
+ offset: 0xCDC
align: 0
reloff: 0x0
nreloc: 0
@@ -373,12 +407,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 485341480100000008000000080000000C000000000000000100000001000600000000000200000005000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF90D9F86F88CB36CF4908311CD1125E5389CB36CF4A08311C522B70536A7F9A7C8000000094000000A4000000B4000000C4000000D4000000E4000000F40000008A0000000200000015020000690200000000000055000000010000009A0000000000000045000000010000005E00000000000000A3000000010000001502000000000000750000000100000003010000000000006500000001000000C700000000000000BB00000001000000690200000000000085000000010000003F01000000000000
+ content: 485341480100000009000000090000000C00000000000000010000000100060000000000FFFFFFFFFFFFFFFF0100000003000000040000000600000007000000080000004A08311CC78E3C8288CB36CF89CB36CFD1125E53522B705390D9F86F6A7F9A7C4908311C8C0000009C000000AC000000BC000000CC000000DC000000EC00000000010000100100000601000001000000F000000000000000D6000000010000005E00000000000000F600000001000000C30000000000000016010000010000002C01000000000000440100000100000052020000000000005C01000001000000A6020000000000002B0100000200000052020000A60200000000000026010000010000006801000000000000E6000000010000008700000000000000
- sectname: __apple_objc
segname: __DWARF
- addr: 0x829
+ addr: 0x944
size: 36
- offset: 0xCE1
+ offset: 0xDFC
align: 0
reloff: 0x0
nreloc: 0
@@ -389,9 +423,9 @@ LoadCommands:
content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF
- sectname: __apple_namespac
segname: __DWARF
- addr: 0x84D
+ addr: 0x968
size: 36
- offset: 0xD05
+ offset: 0xE20
align: 0
reloff: 0x0
nreloc: 0
@@ -402,9 +436,9 @@ LoadCommands:
content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF
- sectname: __apple_types
segname: __DWARF
- addr: 0x871
+ addr: 0x98C
size: 195
- offset: 0xD29
+ offset: 0xE44
align: 0
reloff: 0x0
nreloc: 0
@@ -412,21 +446,29 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 48534148010000000500000005000000140000000000000003000000010006000300050004000B000000000002000000FFFFFFFF03000000040000007CA8F05D90D9F86F5B738CDC3080880B6320957C64000000770000008A0000009D000000B00000009700000001000000EA010000130000000000008A00000001000000C80100001300000000000031000000010000005700000024000000000000D300000001000000A1020000240000000000002C000000010000005000000024000000000000
+ content: 48534148010000000500000005000000140000000000000003000000010006000300050004000B000000000002000000FFFFFFFF03000000040000007CA8F05D90D9F86F5B738CDC3080880B6320957C64000000770000008A0000009D000000B0000000380100000100000027020000130000000000002B010000010000000502000013000000000000C20000000100000057000000240000000000007401000001000000DE02000024000000000000BD000000010000005000000024000000000000
- sectname: __debug_frame
segname: __DWARF
- addr: 0x938
- size: 208
- offset: 0xDF0
+ addr: 0xA50
+ size: 232
+ offset: 0xF08
align: 3
- reloff: 0x1050
- nreloc: 7
+ reloff: 0x11B0
+ nreloc: 8
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 14000000FFFFFFFF0400080001781E0C1F00000000000000140000000000000000000000000000000800000000000000140000000000000008000000000000000800000000000000140000000000000010000000000000000800000000000000140000000000000018000000000000000800000000000000240000000000000020000000000000005800000000000000500C1D109E019D02930394040000000014000000000000007800000000000000040000000000000014000000000000007C000000000000000400000000000000
+ content: 14000000FFFFFFFF0400080001781E0C1F00000000000000140000000000000000000000000000000800000000000000140000000000000008000000000000000800000000000000140000000000000010000000000000000800000000000000140000000000000018000000000000000800000000000000140000000000000020000000000000000800000000000000240000000000000028000000000000006400000000000000500C1D109E019D02930394040000000014000000000000008C000000000000000400000000000000140000000000000090000000000000000400000000000000
relocations:
+ - address: 0xD8
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
- address: 0xC0
symbolnum: 1
pcrel: false
@@ -435,7 +477,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xA8
+ - address: 0x98
symbolnum: 1
pcrel: false
length: 3
@@ -485,18 +527,26 @@ LoadCommands:
value: 0
- sectname: __debug_line
segname: __DWARF
- addr: 0xA08
- size: 225
- offset: 0xEC0
+ addr: 0xB38
+ size: 253
+ offset: 0xFF0
align: 0
- reloff: 0x1088
- nreloc: 7
+ reloff: 0x11F0
+ nreloc: 8
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
relocations:
- - address: 0xD1
+ - address: 0xED
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0xD9
symbolnum: 1
pcrel: false
length: 3
@@ -504,7 +554,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xBD
+ - address: 0xAA
symbolnum: 1
pcrel: false
length: 3
@@ -512,7 +562,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x92
+ - address: 0x96
symbolnum: 1
pcrel: false
length: 3
@@ -560,21 +610,21 @@ LoadCommands:
ntools: 0
- cmd: LC_LINKER_OPTIMIZATION_HINT
cmdsize: 16
- dataoff: 4288
+ dataoff: 4656
datasize: 8
- cmd: LC_SYMTAB
cmdsize: 24
- symoff: 4296
- nsyms: 10
- stroff: 4456
- strsize: 144
+ symoff: 4664
+ nsyms: 11
+ stroff: 4840
+ strsize: 168
- cmd: LC_DYSYMTAB
cmdsize: 80
ilocalsym: 0
nlocalsym: 3
iextdefsym: 3
- nextdefsym: 7
- iundefsym: 10
+ nextdefsym: 8
+ iundefsym: 11
nundefsym: 0
tocoff: 0
ntoc: 0
@@ -590,7 +640,7 @@ LoadCommands:
nlocrel: 0
LinkEditData:
NameList:
- - n_strx: 138
+ - n_strx: 155
n_type: 0xE
n_sect: 1
n_desc: 0
@@ -599,47 +649,52 @@ LinkEditData:
n_type: 0xE
n_sect: 2
n_desc: 0
- n_value: 128
- - n_strx: 132
+ n_value: 148
+ - n_strx: 149
n_type: 0xE
n_sect: 2
n_desc: 0
- n_value: 128
+ n_value: 148
- n_strx: 39
n_type: 0xF
n_sect: 1
n_desc: 192
- n_value: 120
+ n_value: 140
- n_strx: 14
n_type: 0xF
n_sect: 1
n_desc: 192
- n_value: 124
+ n_value: 144
+ - n_strx: 132
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 0
- n_strx: 115
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 8
+ n_value: 16
- n_strx: 81
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 24
+ n_value: 32
- n_strx: 98
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 0
+ n_value: 8
- n_strx: 64
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 16
+ n_value: 24
- n_strx: 8
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 32
+ n_value: 40
StringTable:
- ''
- l_.str
@@ -650,16 +705,25 @@ LinkEditData:
- _function2_copy2
- _function3_copy1
- _function2_copy1
+ - _function1_copy1
- ltmp1
- ltmp0
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
DWARF:
debug_str:
- - ''
+ - 'Facebook clang version 19.1.5 (https://git.internal.tfbnw.net/repos/git/rw/osmeta/external/llvm-project b36c9ae1f8f2b39e4aafb9ca4700c608c3036365)'
- stmt_seq_macho.cpp
- '/'
- '/private/tmp/stmt_seq'
- char
- __ARRAY_SIZE_TYPE__
+ - function1_copy1
- function3_copy1
- function2_copy1
- function3_copy2
@@ -786,6 +850,18 @@ DWARF:
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_decl_file
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_decl_line
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_type
+ Form: DW_FORM_ref4
+ - Code: 0xA
+ Tag: DW_TAG_formal_parameter
+ Children: DW_CHILDREN_no
+ Attributes:
- Attribute: DW_AT_location
Form: DW_FORM_sec_offset
- Attribute: DW_AT_name
@@ -796,7 +872,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xA
+ - Code: 0xB
Tag: DW_TAG_variable
Children: DW_CHILDREN_no
Attributes:
@@ -810,7 +886,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xB
+ - Code: 0xC
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -836,7 +912,7 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_APPLE_optimized
Form: DW_FORM_flag_present
- - Code: 0xC
+ - Code: 0xD
Tag: DW_TAG_variable
Children: DW_CHILDREN_no
Attributes:
@@ -850,7 +926,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xD
+ - Code: 0xE
Tag: DW_TAG_call_site
Children: DW_CHILDREN_yes
Attributes:
@@ -858,7 +934,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_call_return_pc
Form: DW_FORM_addr
- - Code: 0xE
+ - Code: 0xF
Tag: DW_TAG_call_site_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -866,7 +942,7 @@ DWARF:
Form: DW_FORM_exprloc
- Attribute: DW_AT_call_value
Form: DW_FORM_exprloc
- - Code: 0xF
+ - Code: 0x10
Tag: DW_TAG_structure_type
Children: DW_CHILDREN_yes
Attributes:
@@ -880,7 +956,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_decl_line
Form: DW_FORM_data1
- - Code: 0x10
+ - Code: 0x11
Tag: DW_TAG_inheritance
Children: DW_CHILDREN_no
Attributes:
@@ -888,7 +964,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_data_member_location
Form: DW_FORM_data1
- - Code: 0x11
+ - Code: 0x12
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -906,7 +982,7 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_explicit
Form: DW_FORM_flag_present
- - Code: 0x12
+ - Code: 0x13
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -914,13 +990,13 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_artificial
Form: DW_FORM_flag_present
- - Code: 0x13
+ - Code: 0x14
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x14
+ - Code: 0x15
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -936,13 +1012,13 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_APPLE_optimized
Form: DW_FORM_flag_present
- - Code: 0x15
+ - Code: 0x16
Tag: DW_TAG_pointer_type
Children: DW_CHILDREN_no
Attributes:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x16
+ - Code: 0x17
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -964,7 +1040,7 @@ DWARF:
Form: DW_FORM_strp
- Attribute: DW_AT_specification
Form: DW_FORM_ref4
- - Code: 0x17
+ - Code: 0x18
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -976,7 +1052,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_artificial
Form: DW_FORM_flag_present
- - Code: 0x18
+ - Code: 0x19
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -990,7 +1066,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x19
+ - Code: 0x1A
Tag: DW_TAG_call_site
Children: DW_CHILDREN_yes
Attributes:
@@ -1001,7 +1077,7 @@ DWARF:
- Attribute: DW_AT_call_pc
Form: DW_FORM_addr
debug_info:
- - Length: 0x2AA
+ - Length: 0x2E7
Version: 4
AbbrevTableID: 0
AbbrOffset: 0x0
@@ -1011,20 +1087,20 @@ DWARF:
Values:
- Value: 0x0
- Value: 0x21
- - Value: 0x1
- - Value: 0x14
+ - Value: 0x92
+ - Value: 0xA5
- Value: 0x0
- - Value: 0x16
+ - Value: 0xA7
- Value: 0x1
- Value: 0x0
- - Value: 0x80
+ - Value: 0x94
- AbbrCode: 0x2
Values:
- Value: 0x3F
- Value: 0x1
- - Value: 0x23
+ - Value: 0x27
- Value: 0x9
- BlockData: [ 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ BlockData: [ 0x3, 0x94, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0 ]
- AbbrCode: 0x3
Values:
@@ -1039,12 +1115,12 @@ DWARF:
- Value: 0x50
- AbbrCode: 0x6
Values:
- - Value: 0x2C
+ - Value: 0xBD
- Value: 0x6
- Value: 0x1
- AbbrCode: 0x7
Values:
- - Value: 0x31
+ - Value: 0xC2
- Value: 0x8
- Value: 0x7
- AbbrCode: 0x8
@@ -1056,285 +1132,318 @@ DWARF:
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x45
+ - Value: 0xD6
- Value: 0x1
- - Value: 0x3
- - Value: 0x2A1
+ - Value: 0x2
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- AbbrCode: 0x9
Values:
- - Value: 0x0
- - Value: 0xD7
+ - Value: 0x178
+ - Value: 0x1
+ - Value: 0x2
+ - Value: 0x2DE
+ - AbbrCode: 0x0
+ - AbbrCode: 0x8
+ Values:
+ - Value: 0x8
+ - Value: 0x8
+ - Value: 0x1
+ - Value: 0x4A
+ - Value: 0x1
+ BlockData: [ 0x6F ]
+ - Value: 0x1
+ - Value: 0xE6
+ - Value: 0x1
+ - Value: 0x6
+ - Value: 0x2DE
+ - Value: 0x1
- Value: 0x1
- - Value: 0x3
- - Value: 0x2A1
- AbbrCode: 0xA
Values:
+ - Value: 0x0
+ - Value: 0x178
+ - Value: 0x1
+ - Value: 0x6
+ - Value: 0x2DE
+ - AbbrCode: 0xB
+ Values:
- Value: 0x39
- - Value: 0xD9
+ - Value: 0x17A
- Value: 0x1
- - Value: 0x4
- - Value: 0x2A1
+ - Value: 0x7
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x8
+ - Value: 0x10
- Value: 0x8
- Value: 0x1
- - Value: 0x4A
+ - Value: 0x60
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x55
+ - Value: 0xF6
- Value: 0x1
- - Value: 0x8
- - Value: 0x2A1
+ - Value: 0xB
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0x5E
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0x8
- - Value: 0x2A1
+ - Value: 0xB
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x10
+ - Value: 0x18
- Value: 0x8
- Value: 0x1
- - Value: 0x60
+ - Value: 0x78
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x65
+ - Value: 0x106
- Value: 0x1
- - Value: 0xC
- - Value: 0x2A1
+ - Value: 0xF
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0x97
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0xC
- - Value: 0x2A1
- - AbbrCode: 0xA
+ - Value: 0xF
+ - Value: 0x2DE
+ - AbbrCode: 0xB
Values:
- Value: 0xD0
- - Value: 0xD9
+ - Value: 0x17A
- Value: 0x1
- - Value: 0xD
- - Value: 0x2A1
+ - Value: 0x10
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x18
+ - Value: 0x20
- Value: 0x8
- Value: 0x1
- - Value: 0x78
+ - Value: 0x90
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x75
+ - Value: 0x116
- Value: 0x1
- - Value: 0x11
- - Value: 0x2A1
+ - Value: 0x14
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0xF5
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0x11
- - Value: 0x2A1
- - AbbrCode: 0xA
+ - Value: 0x14
+ - Value: 0x2DE
+ - AbbrCode: 0xB
Values:
- Value: 0x12E
- - Value: 0xDB
+ - Value: 0x17C
- Value: 0x1
- - Value: 0x12
- - Value: 0x2A1
+ - Value: 0x15
+ - Value: 0x2DE
- AbbrCode: 0x0
- - AbbrCode: 0xB
+ - AbbrCode: 0xC
Values:
- - Value: 0x20
- - Value: 0x58
- - Value: 0x8F
+ - Value: 0x28
+ - Value: 0x64
+ - Value: 0xA7
- Value: 0x1
BlockData: [ 0x6D ]
- Value: 0x1
- - Value: 0x85
+ - Value: 0x126
- Value: 0x1
- - Value: 0x1E
- - Value: 0x2A1
+ - Value: 0x21
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0xC
+ - AbbrCode: 0xD
Values:
- Value: 0x2
BlockData: [ 0x8F, 0xF ]
- - Value: 0xE2
+ - Value: 0x183
- Value: 0x1
- - Value: 0x23
- - Value: 0x1C8
- - AbbrCode: 0xA
+ - Value: 0x27
+ - Value: 0x205
+ - AbbrCode: 0xB
Values:
- Value: 0x151
- - Value: 0xE4
+ - Value: 0x185
- Value: 0x1
- - Value: 0x1F
- - Value: 0x2A1
- - AbbrCode: 0xD
- Values:
- - Value: 0x103
- - Value: 0x38
+ - Value: 0x22
+ - Value: 0x2DE
- AbbrCode: 0xE
Values:
+ - Value: 0x12C
+ - Value: 0x40
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x1
BlockData: [ 0x33 ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
- Values:
- - Value: 0xC7
- - Value: 0x44
- AbbrCode: 0xE
Values:
+ - Value: 0xF0
+ - Value: 0x4C
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x2
BlockData: [ 0x10, 0x29 ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
- Values:
- - Value: 0x9A
- - Value: 0x50
- AbbrCode: 0xE
Values:
+ - Value: 0xC3
+ - Value: 0x58
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x1
BlockData: [ 0x3B ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
+ - AbbrCode: 0xE
Values:
- - Value: 0x215
+ - Value: 0x5E
- Value: 0x64
+ - AbbrCode: 0xF
+ Values:
+ - Value: 0x1
+ BlockData: [ 0x50 ]
+ - Value: 0x2
+ BlockData: [ 0x10, 0x2A ]
+ - AbbrCode: 0x0
- AbbrCode: 0xE
Values:
+ - Value: 0x252
+ - Value: 0x78
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x2
BlockData: [ 0x8F, 0xF ]
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0xF
+ - AbbrCode: 0x10
Values:
- Value: 0x5
- - Value: 0x8A
+ - Value: 0x12B
- Value: 0x1
- Value: 0x1
- - Value: 0x1A
- - AbbrCode: 0x10
+ - Value: 0x1D
+ - AbbrCode: 0x11
Values:
- - Value: 0x1EA
+ - Value: 0x227
- Value: 0x0
- - AbbrCode: 0x11
+ - AbbrCode: 0x12
Values:
- - Value: 0x8A
+ - Value: 0x12B
- Value: 0x1
- - Value: 0x1B
+ - Value: 0x1E
- Value: 0x1
- Value: 0x1
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x12
+ - AbbrCode: 0x13
Values:
- - Value: 0x210
+ - Value: 0x24D
- Value: 0x1
- - AbbrCode: 0x13
+ - AbbrCode: 0x14
Values:
- - Value: 0x20B
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0xF
+ - AbbrCode: 0x10
Values:
- Value: 0x5
- - Value: 0x97
+ - Value: 0x138
- Value: 0x1
- Value: 0x1
- - Value: 0x16
- - AbbrCode: 0x14
+ - Value: 0x19
+ - AbbrCode: 0x15
Values:
- - Value: 0x97
+ - Value: 0x138
- Value: 0x1
- - Value: 0x17
+ - Value: 0x1A
- Value: 0x1
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x12
+ - AbbrCode: 0x13
Values:
- - Value: 0x206
+ - Value: 0x243
- Value: 0x1
- - AbbrCode: 0x13
+ - AbbrCode: 0x14
Values:
- - Value: 0x20B
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0x15
+ - AbbrCode: 0x16
Values:
- - Value: 0x1EA
- - AbbrCode: 0x15
+ - Value: 0x227
+ - AbbrCode: 0x16
Values:
- Value: 0x4B
- - AbbrCode: 0x15
- Values:
- - Value: 0x1C8
- AbbrCode: 0x16
Values:
- - Value: 0x78
+ - Value: 0x205
+ - AbbrCode: 0x17
+ Values:
+ - Value: 0x8C
- Value: 0x4
- Value: 0x1
- - Value: 0xB7
+ - Value: 0xD3
- Value: 0x1
BlockData: [ 0x6F ]
- - Value: 0x234
+ - Value: 0x271
- Value: 0x1
- - Value: 0xA3
- - Value: 0x1D7
- - AbbrCode: 0x17
+ - Value: 0x144
+ - Value: 0x214
+ - AbbrCode: 0x18
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- - Value: 0xE8
- - Value: 0x2A8
+ - Value: 0x189
+ - Value: 0x2E5
- Value: 0x1
- - AbbrCode: 0x18
+ - AbbrCode: 0x19
Values:
- Value: 0x1
BlockData: [ 0x51 ]
- - Value: 0xED
+ - Value: 0x18E
- Value: 0x1
- - Value: 0x1B
- - Value: 0x20B
- - AbbrCode: 0x19
+ - Value: 0x1E
+ - Value: 0x248
+ - AbbrCode: 0x1A
Values:
- - Value: 0x269
+ - Value: 0x2A6
- Value: 0x1
- - Value: 0x78
- - AbbrCode: 0xE
+ - Value: 0x8C
+ - AbbrCode: 0xF
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x3
BlockData: [ 0xA3, 0x1, 0x50 ]
- - AbbrCode: 0xE
+ - AbbrCode: 0xF
Values:
- Value: 0x1
BlockData: [ 0x51 ]
@@ -1342,45 +1451,45 @@ DWARF:
BlockData: [ 0xA3, 0x1, 0x51 ]
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0x16
+ - AbbrCode: 0x17
Values:
- - Value: 0x7C
+ - Value: 0x90
- Value: 0x4
- Value: 0x1
- - Value: 0xCB
+ - Value: 0xE7
- Value: 0x1
BlockData: [ 0x6F ]
- - Value: 0x288
+ - Value: 0x2C5
- Value: 0x1
- - Value: 0xBB
- - Value: 0x1D7
- - AbbrCode: 0x17
+ - Value: 0x15C
+ - Value: 0x214
+ - AbbrCode: 0x18
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- - Value: 0xE8
- - Value: 0x2A8
+ - Value: 0x189
+ - Value: 0x2E5
- Value: 0x1
- - AbbrCode: 0x18
+ - AbbrCode: 0x19
Values:
- Value: 0x1
BlockData: [ 0x51 ]
- - Value: 0xED
+ - Value: 0x18E
- Value: 0x1
- - Value: 0x1B
- - Value: 0x20B
+ - Value: 0x1E
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x6
Values:
- - Value: 0xD3
+ - Value: 0x174
- Value: 0x5
- Value: 0x4
- - AbbrCode: 0x15
+ - AbbrCode: 0x16
Values:
- - Value: 0x1C8
+ - Value: 0x205
- AbbrCode: 0x0
debug_line:
- - Length: 221
+ - Length: 249
Version: 4
PrologueLength: 42
MinInstLength: 1
@@ -1397,17 +1506,17 @@ DWARF:
Length: 0
Opcodes:
- Opcode: DW_LNS_set_column
- Data: 14
+ Data: 10
- Opcode: DW_LNS_set_prologue_end
Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 0
- - Opcode: 0x16
+ - Opcode: 0x14
Data: 0
- Opcode: DW_LNS_set_column
- Data: 5
+ Data: 3
- Opcode: DW_LNS_negate_stmt
Data: 0
- Opcode: 0x4A
@@ -1424,7 +1533,7 @@ DWARF:
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 8
- - Opcode: 0x1A
+ - Opcode: 0x19
Data: 0
- Opcode: DW_LNS_set_column
Data: 5
@@ -1445,7 +1554,7 @@ DWARF:
SubOpcode: DW_LNE_set_address
Data: 16
- Opcode: DW_LNS_advance_line
- SData: 13
+ SData: 11
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1460,7 +1569,7 @@ DWARF:
SubOpcode: DW_LNE_end_sequence
Data: 0
- Opcode: DW_LNS_set_column
- Data: 20
+ Data: 14
- Opcode: DW_LNS_set_prologue_end
Data: 0
- Opcode: DW_LNS_extended_op
@@ -1468,24 +1577,47 @@ DWARF:
SubOpcode: DW_LNE_set_address
Data: 24
- Opcode: DW_LNS_advance_line
- SData: 17
+ SData: 16
Data: 0
- Opcode: DW_LNS_copy
Data: 0
- Opcode: DW_LNS_set_column
Data: 5
- - Opcode: 0x4B
+ - Opcode: DW_LNS_negate_stmt
+ Data: 0
+ - Opcode: 0x4A
Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 1
SubOpcode: DW_LNE_end_sequence
Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 20
+ - Opcode: DW_LNS_set_prologue_end
+ Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 32
- Opcode: DW_LNS_advance_line
- SData: 29
+ SData: 20
+ Data: 0
+ - Opcode: DW_LNS_copy
+ Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 5
+ - Opcode: 0x4B
+ Data: 0
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 1
+ SubOpcode: DW_LNE_end_sequence
+ Data: 0
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 9
+ SubOpcode: DW_LNE_set_address
+ Data: 40
+ - Opcode: DW_LNS_advance_line
+ SData: 32
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1509,9 +1641,15 @@ DWARF:
Data: 0
- Opcode: 0x4B
Data: 0
+ - Opcode: 0xBB
+ Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 9
+ - Opcode: 0x81
+ Data: 0
- Opcode: DW_LNS_set_column
Data: 18
- - Opcode: 0xBB
+ - Opcode: 0x4C
Data: 0
- Opcode: DW_LNS_set_column
Data: 9
@@ -1534,9 +1672,9 @@ DWARF:
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
- Data: 120
+ Data: 140
- Opcode: DW_LNS_advance_line
- SData: 26
+ SData: 29
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1551,9 +1689,9 @@ DWARF:
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
- Data: 124
+ Data: 144
- Opcode: DW_LNS_advance_line
- SData: 26
+ SData: 29
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1604,7 +1742,7 @@ LoadCommands:
- sectname: __text
segname: __TEXT
addr: 0x1000002F0
- size: 112
+ size: 132
offset: 0x2F0
align: 2
reloff: 0x0
@@ -1613,12 +1751,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00580051C0035FD600100011C0035FD6FFC300D1F44F01A9FD7B02A9FD83009160008052F7FFFF97F30300AA20058052F6FFFF971400130B60018052F1FFFF97F30300AA610100101F2003D5E03F0091060000948002130BFD7B42A9F44F41A9FFC30091C0035FD601000014C0035FD6
+ content: 00040011C0035FD600580051C0035FD600100011C0035FD6FFC300D1F44F01A9FD7B02A9FD83009160008052F7FFFF97F30300AA20058052F6FFFF971400130B60018052F1FFFF97F30300AA40058052ECFFFF977302000B610100101F2003D5E03F0091060000948002130BFD7B42A9F44F41A9FFC30091C0035FD601000014C0035FD6
- sectname: __cstring
segname: __TEXT
- addr: 0x100000360
+ addr: 0x100000374
size: 5
- offset: 0x360
+ offset: 0x374
align: 0
reloff: 0x0
nreloc: 0
@@ -1631,9 +1769,9 @@ LoadCommands:
cmdsize: 72
segname: __LINKEDIT
vmaddr: 4294983680
- vmsize: 960
+ vmsize: 1040
fileoff: 16384
- filesize: 960
+ filesize: 1040
maxprot: 1
initprot: 1
nsects: 0
@@ -1649,20 +1787,20 @@ LoadCommands:
lazy_bind_off: 0
lazy_bind_size: 0
export_off: 16384
- export_size: 96
+ export_size: 112
- cmd: LC_SYMTAB
cmdsize: 24
- symoff: 16488
- nsyms: 22
- stroff: 16840
- strsize: 192
+ symoff: 16504
+ nsyms: 25
+ stroff: 16904
+ strsize: 208
- cmd: LC_DYSYMTAB
cmdsize: 80
ilocalsym: 0
- nlocalsym: 17
- iextdefsym: 17
- nextdefsym: 5
- iundefsym: 22
+ nlocalsym: 19
+ iextdefsym: 19
+ nextdefsym: 6
+ iundefsym: 25
nundefsym: 0
tocoff: 0
ntoc: 0
@@ -1683,7 +1821,7 @@ LoadCommands:
ZeroPadBytes: 7
- cmd: LC_UUID
cmdsize: 24
- uuid: 4C4C4480-5555-3144-A138-E5DA50CC68DB
+ uuid: 4C4C443F-5555-3144-A15F-DE084AB2A15B
- cmd: LC_BUILD_VERSION
cmdsize: 32
platform: 1
@@ -1692,22 +1830,22 @@ LoadCommands:
ntools: 1
Tools:
- tool: 4
- version: 1376256
+ version: 1245445
- cmd: LC_MAIN
cmdsize: 24
- entryoff: 768
+ entryoff: 776
stacksize: 0
- cmd: LC_FUNCTION_STARTS
cmdsize: 16
- dataoff: 16480
+ dataoff: 16496
datasize: 8
- cmd: LC_DATA_IN_CODE
cmdsize: 16
- dataoff: 16488
+ dataoff: 16504
datasize: 0
- cmd: LC_CODE_SIGNATURE
cmdsize: 16
- dataoff: 17040
+ dataoff: 17120
datasize: 304
LinkEditData:
ExportTrie:
@@ -1738,7 +1876,7 @@ LinkEditData:
NodeOffset: 47
Name: main
Flags: 0x0
- Address: 0x300
+ Address: 0x308
Other: 0x0
ImportName: ''
- TerminalSize: 0
@@ -1749,8 +1887,15 @@ LinkEditData:
Other: 0x0
ImportName: ''
Children:
+ - TerminalSize: 3
+ NodeOffset: 80
+ Name: 1_copy1
+ Flags: 0x0
+ Address: 0x2F0
+ Other: 0x0
+ ImportName: ''
- TerminalSize: 0
- NodeOffset: 71
+ NodeOffset: 85
Name: 2_copy
Flags: 0x0
Address: 0x0
@@ -1758,52 +1903,52 @@ LinkEditData:
ImportName: ''
Children:
- TerminalSize: 3
- NodeOffset: 79
+ NodeOffset: 93
Name: '1'
Flags: 0x0
- Address: 0x2F0
+ Address: 0x2F8
Other: 0x0
ImportName: ''
- TerminalSize: 3
- NodeOffset: 84
+ NodeOffset: 98
Name: '2'
Flags: 0x0
- Address: 0x2F0
+ Address: 0x2F8
Other: 0x0
ImportName: ''
- TerminalSize: 3
- NodeOffset: 89
+ NodeOffset: 103
Name: 3_copy2
Flags: 0x0
- Address: 0x2F8
+ Address: 0x300
Other: 0x0
ImportName: ''
NameList:
- - n_strx: 129
+ - n_strx: 146
n_type: 0x64
n_sect: 0
n_desc: 0
n_value: 0
- - n_strx: 170
+ - n_strx: 187
n_type: 0x66
n_sect: 0
n_desc: 1
n_value: 0
- - n_strx: 59
+ - n_strx: 76
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968152
+ n_value: 4294968172
- n_strx: 1
n_type: 0x24
n_sect: 0
n_desc: 0
n_value: 4
- - n_strx: 84
+ - n_strx: 101
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968156
+ n_value: 4294968176
- n_strx: 1
n_type: 0x24
n_sect: 0
@@ -1813,12 +1958,12 @@ LinkEditData:
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968064
+ n_value: 4294968072
- n_strx: 1
n_type: 0x24
n_sect: 0
n_desc: 0
- n_value: 88
+ n_value: 100
- n_strx: 8
n_type: 0x24
n_sect: 1
@@ -1843,7 +1988,17 @@ LinkEditData:
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968048
+ n_value: 4294968064
+ - n_strx: 1
+ n_type: 0x24
+ n_sect: 0
+ n_desc: 0
+ n_value: 8
+ - n_strx: 59
+ n_type: 0x24
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968056
- n_strx: 1
n_type: 0x24
n_sect: 0
@@ -1854,21 +2009,21 @@ LinkEditData:
n_sect: 1
n_desc: 0
n_value: 0
- - n_strx: 59
+ - n_strx: 76
n_type: 0x1E
n_sect: 1
n_desc: 0
- n_value: 4294968152
- - n_strx: 84
+ n_value: 4294968172
+ - n_strx: 101
n_type: 0x1E
n_sect: 1
n_desc: 0
- n_value: 4294968156
+ n_value: 4294968176
- n_strx: 2
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 4294968064
+ n_value: 4294968072
- n_strx: 8
n_type: 0xF
n_sect: 1
@@ -1883,8 +2038,13 @@ LinkEditData:
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 4294968048
- - n_strx: 109
+ n_value: 4294968064
+ - n_strx: 59
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968056
+ - n_strx: 126
n_type: 0xF
n_sect: 1
n_desc: 16
@@ -1892,6 +2052,7 @@ LinkEditData:
StringTable:
- ' '
- _main
+ - _function1_copy1
- _function2_copy1
- _function3_copy2
- _function2_copy2
@@ -1904,6 +2065,5 @@ LinkEditData:
- ''
- ''
- ''
- - ''
- FunctionStarts: [ 0x2F0, 0x2F8, 0x300, 0x358, 0x35C ]
+ FunctionStarts: [ 0x2F0, 0x2F8, 0x300, 0x308, 0x36C, 0x370 ]
...
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
index f69c0b1..fc530a4 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
index ed679dc..d492076 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
index 14cbfeb..8810ee1 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
index c3ac49e..6943c18 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe
new file mode 100755
index 0000000..14cbfeb
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw
new file mode 100644
index 0000000..c3ac49e
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
index 1b4db88..4ab8040 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
index e959e76..c6aec8d 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
index 2822f2f..5af6c81 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
index 05deb2e..8958af9 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
index 22c6136..e9ec22c 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
index 364aa1c..3952768 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
index 34db7e7..e50f663 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
index 7a7d3a6..df6fcb1 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
index f7d1723..63eea44 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
index 0920028..b6a733a 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test b/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
index 3d30a62..ce534db 100644
--- a/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
+++ b/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
@@ -7,7 +7,7 @@ We expect 5 MIBs, each with different AccessHistogramValues.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 5
CHECK-NEXT: NumAllocFunctions: 3
@@ -241,4 +241,4 @@ CHECK-NEXT: MinLifetimeAccessDensity: 56000
CHECK-NEXT: MaxLifetimeAccessDensity: 56000
CHECK-NEXT: AccessHistogramSize: 8
CHECK-NEXT: AccessHistogram: {{[0-9]+}}
-CHECK-NEXT: AccessHistogramValues: 168 147 126 105 84 63 42 21 \ No newline at end of file
+CHECK-NEXT: AccessHistogramValues: 168 147 126 105 84 63 42 21
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic.test b/llvm/test/tools/llvm-profdata/memprof-basic.test
index e15df50..81550eb 100644
--- a/llvm/test/tools/llvm-profdata/memprof-basic.test
+++ b/llvm/test/tools/llvm-profdata/memprof-basic.test
@@ -8,7 +8,7 @@ additional allocations which do not originate from the main binary are pruned.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -96,4 +96,4 @@ CHECK-NEXT: TotalLifetimeAccessDensity: 20000
CHECK-NEXT: MinLifetimeAccessDensity: 20000
CHECK-NEXT: MaxLifetimeAccessDensity: 20000
CHECK-NEXT: AccessHistogramSize: 0
-CHECK-NEXT: AccessHistogram: 0 \ No newline at end of file
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic_v4.test b/llvm/test/tools/llvm-profdata/memprof-basic_v4.test
new file mode 100644
index 0000000..79d4fe2
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/memprof-basic_v4.test
@@ -0,0 +1,102 @@
+REQUIRES: x86_64-linux
+
+This is a copy of memprof-basic.test with slight changes to check that we can still read v3 of memprofraw.
+
+Inputs cannot and should not be updated.
+
+RUN: llvm-profdata show --memory %p/Inputs/basic_v4.memprofraw --profiled-binary %p/Inputs/basic_v4.memprofexe -o - | FileCheck %s
+
+We expect 2 MIB entries, 1 each for the malloc calls in the program. Any
+additional allocations which do not originate from the main binary are pruned.
+
+CHECK: MemprofProfile:
+CHECK-NEXT: Summary:
+CHECK-NEXT: Version: 4
+CHECK-NEXT: NumSegments: {{[0-9]+}}
+CHECK-NEXT: NumMibInfo: 2
+CHECK-NEXT: NumAllocFunctions: 1
+CHECK-NEXT: NumStackOffsets: 2
+CHECK-NEXT: Segments:
+CHECK-NEXT: -
+CHECK-NEXT: BuildId: {{[[:xdigit:]]+}}
+CHECK-NEXT: Start: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: End: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: Offset: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: -
+
+CHECK: Records:
+CHECK-NEXT: -
+CHECK-NEXT: FunctionGUID: {{[0-9]+}}
+CHECK-NEXT: AllocSites:
+CHECK-NEXT: -
+CHECK-NEXT: Callstack:
+CHECK-NEXT: -
+CHECK-NEXT: Function: {{[0-9]+}}
+CHECK-NEXT: SymbolName: main
+CHECK-NEXT: LineOffset: 1
+CHECK-NEXT: Column: 21
+CHECK-NEXT: Inline: 0
+CHECK-NEXT: MemInfoBlock:
+CHECK-NEXT: AllocCount: 1
+CHECK-NEXT: TotalAccessCount: 2
+CHECK-NEXT: MinAccessCount: 2
+CHECK-NEXT: MaxAccessCount: 2
+CHECK-NEXT: TotalSize: 10
+CHECK-NEXT: MinSize: 10
+CHECK-NEXT: MaxSize: 10
+CHECK-NEXT: AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT: DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT: TotalLifetime: 0
+CHECK-NEXT: MinLifetime: 0
+CHECK-NEXT: MaxLifetime: 0
+CHECK-NEXT: AllocCpuId: {{[0-9]+}}
+CHECK-NEXT: DeallocCpuId: {{[0-9]+}}
+CHECK-NEXT: NumMigratedCpu: 0
+CHECK-NEXT: NumLifetimeOverlaps: 0
+CHECK-NEXT: NumSameAllocCpu: 0
+CHECK-NEXT: NumSameDeallocCpu: 0
+CHECK-NEXT: DataTypeId: {{[0-9]+}}
+CHECK-NEXT: TotalAccessDensity: 20
+CHECK-NEXT: MinAccessDensity: 20
+CHECK-NEXT: MaxAccessDensity: 20
+CHECK-NEXT: TotalLifetimeAccessDensity: 20000
+CHECK-NEXT: MinLifetimeAccessDensity: 20000
+CHECK-NEXT: MaxLifetimeAccessDensity: 20000
+CHECK-NEXT: AccessHistogramSize: 0
+CHECK-NEXT: AccessHistogram: 0
+CHECK-NEXT: -
+CHECK-NEXT: Callstack:
+CHECK-NEXT: -
+CHECK-NEXT: Function: {{[0-9]+}}
+CHECK-NEXT: SymbolName: main
+CHECK-NEXT: LineOffset: 4
+CHECK-NEXT: Column: 15
+CHECK-NEXT: Inline: 0
+CHECK-NEXT: MemInfoBlock:
+CHECK-NEXT: AllocCount: 1
+CHECK-NEXT: TotalAccessCount: 2
+CHECK-NEXT: MinAccessCount: 2
+CHECK-NEXT: MaxAccessCount: 2
+CHECK-NEXT: TotalSize: 10
+CHECK-NEXT: MinSize: 10
+CHECK-NEXT: MaxSize: 10
+CHECK-NEXT: AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT: DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT: TotalLifetime: 0
+CHECK-NEXT: MinLifetime: 0
+CHECK-NEXT: MaxLifetime: 0
+CHECK-NEXT: AllocCpuId: {{[0-9]+}}
+CHECK-NEXT: DeallocCpuId: {{[0-9]+}}
+CHECK-NEXT: NumMigratedCpu: 0
+CHECK-NEXT: NumLifetimeOverlaps: 0
+CHECK-NEXT: NumSameAllocCpu: 0
+CHECK-NEXT: NumSameDeallocCpu: 0
+CHECK-NEXT: DataTypeId: {{[0-9]+}}
+CHECK-NEXT: TotalAccessDensity: 20
+CHECK-NEXT: MinAccessDensity: 20
+CHECK-NEXT: MaxAccessDensity: 20
+CHECK-NEXT: TotalLifetimeAccessDensity: 20000
+CHECK-NEXT: MinLifetimeAccessDensity: 20000
+CHECK-NEXT: MaxLifetimeAccessDensity: 20000
+CHECK-NEXT: AccessHistogramSize: 0
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/llvm-profdata/memprof-inline.test b/llvm/test/tools/llvm-profdata/memprof-inline.test
index 79ce2ad..4a3f620 100644
--- a/llvm/test/tools/llvm-profdata/memprof-inline.test
+++ b/llvm/test/tools/llvm-profdata/memprof-inline.test
@@ -5,7 +5,7 @@ RUN: llvm-profdata show --memory %p/Inputs/inline.memprofraw --profiled-binary %
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 2
diff --git a/llvm/test/tools/llvm-profdata/memprof-multi.test b/llvm/test/tools/llvm-profdata/memprof-multi.test
index 6243982..35f94df 100644
--- a/llvm/test/tools/llvm-profdata/memprof-multi.test
+++ b/llvm/test/tools/llvm-profdata/memprof-multi.test
@@ -7,7 +7,7 @@ We expect 2 MIB entries, 1 each for the malloc calls in the program.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
diff --git a/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test b/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
index 4ba58e3..2d0346e 100644
--- a/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
+++ b/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
@@ -7,7 +7,7 @@ We expect 2 different MIBs with histogram values. This test is to make sure we p
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -21,79 +21,79 @@ CHECK-NEXT: Offset: 0x{{[[:xdigit:]]+}}
CHECK-NEXT: -
CHECK: Records:
-CHEC-NEXT FunctionGUID: {{[0-9]+}}
-CHEC-NEXT AllocSites:
-CHEC-NEXT -
-CHEC-NEXT Callstack:
-CHEC-NEXT -
-CHEC-NEXT Function: {{[0-9]+}}
-CHEC-NEXT SymbolName: main
-CHEC-NEXT LineOffset: 3
-CHEC-NEXT Column: 10
-CHEC-NEXT Inline: 0
-CHEC-NEXT MemInfoBlock:
-CHEC-NEXT AllocCount: 1
-CHEC-NEXT TotalAccessCount: 5
-CHEC-NEXT MinAccessCount: 5
-CHEC-NEXT MaxAccessCount: 5
-CHEC-NEXT TotalSize: 24
-CHEC-NEXT MinSize: 24
-CHEC-NEXT MaxSize: 24
-CHEC-NEXT AllocTimestamp: {{[0-9]+}}
-CHEC-NEXT DeallocTimestamp: {{[0-9]+}}
-CHEC-NEXT TotalLifetime: 0
-CHEC-NEXT MinLifetime: 0
-CHEC-NEXT MaxLifetime: 0
-CHEC-NEXT AllocCpuId: 11
-CHEC-NEXT DeallocCpuId: 11
-CHEC-NEXT NumMigratedCpu: 0
-CHEC-NEXT NumLifetimeOverlaps: 0
-CHEC-NEXT NumSameAllocCpu: 0
-CHEC-NEXT NumSameDeallocCpu: 0
-CHEC-NEXT DataTypeId: 0
-CHEC-NEXT TotalAccessDensity: 20
-CHEC-NEXT MinAccessDensity: 20
-CHEC-NEXT MaxAccessDensity: 20
-CHEC-NEXT TotalLifetimeAccessDensity: 20000
-CHEC-NEXT MinLifetimeAccessDensity: 20000
-CHEC-NEXT MaxLifetimeAccessDensity: 20000
-CHEC-NEXT AccessHistogramSize: 3
-CHEC-NEXT AccessHistogram: {{[0-9]+}}
-CHEC-NEXT AccessHistogramValues: -2 -1 -2
-CHEC-NEXT -
-CHEC-NEXT Callstack:
-CHEC-NEXT -
-CHEC-NEXT Function: {{[0-9]+}}
-CHEC-NEXT SymbolName: main
-CHEC-NEXT LineOffset: 10
-CHEC-NEXT Column: 10
-CHEC-NEXT Inline: 0
-CHEC-NEXT MemInfoBlock:
-CHEC-NEXT AllocCount: 1
-CHEC-NEXT TotalAccessCount: 4
-CHEC-NEXT MinAccessCount: 4
-CHEC-NEXT MaxAccessCount: 4
-CHEC-NEXT TotalSize: 48
-CHEC-NEXT MinSize: 48
-CHEC-NEXT MaxSize: 48
-CHEC-NEXT AllocTimestamp: {{[0-9]+}}
-CHEC-NEXT DeallocTimestamp: {{[0-9]+}}
-CHEC-NEXT TotalLifetime: 0
-CHEC-NEXT MinLifetime: 0
-CHEC-NEXT MaxLifetime: 0
-CHEC-NEXT AllocCpuId: 11
-CHEC-NEXT DeallocCpuId: 11
-CHEC-NEXT NumMigratedCpu: 0
-CHEC-NEXT NumLifetimeOverlaps: 0
-CHEC-NEXT NumSameAllocCpu: 0
-CHEC-NEXT NumSameDeallocCpu: 0
-CHEC-NEXT DataTypeId: 0
-CHEC-NEXT TotalAccessDensity: 8
-CHEC-NEXT MinAccessDensity: 8
-CHEC-NEXT MaxAccessDensity: 8
-CHEC-NEXT TotalLifetimeAccessDensity: 8000
-CHEC-NEXT MinLifetimeAccessDensity: 8000
-CHEC-NEXT MaxLifetimeAccessDensity: 8000
-CHEC-NEXT AccessHistogramSize: 6
-CHEC-NEXT AccessHistogram: {{[0-9]+}}
-CHEC-NEXT AccessHistogramValues: -2 -0 -0 -0 -1 -1 \ No newline at end of file
+CHECK-NEXT FunctionGUID: {{[0-9]+}}
+CHECK-NEXT AllocSites:
+CHECK-NEXT -
+CHECK-NEXT Callstack:
+CHECK-NEXT -
+CHECK-NEXT Function: {{[0-9]+}}
+CHECK-NEXT SymbolName: main
+CHECK-NEXT LineOffset: 3
+CHECK-NEXT Column: 10
+CHECK-NEXT Inline: 0
+CHECK-NEXT MemInfoBlock:
+CHECK-NEXT AllocCount: 1
+CHECK-NEXT TotalAccessCount: 5
+CHECK-NEXT MinAccessCount: 5
+CHECK-NEXT MaxAccessCount: 5
+CHECK-NEXT TotalSize: 24
+CHECK-NEXT MinSize: 24
+CHECK-NEXT MaxSize: 24
+CHECK-NEXT AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT TotalLifetime: 0
+CHECK-NEXT MinLifetime: 0
+CHECK-NEXT MaxLifetime: 0
+CHECK-NEXT AllocCpuId: 11
+CHECK-NEXT DeallocCpuId: 11
+CHECK-NEXT NumMigratedCpu: 0
+CHECK-NEXT NumLifetimeOverlaps: 0
+CHECK-NEXT NumSameAllocCpu: 0
+CHECK-NEXT NumSameDeallocCpu: 0
+CHECK-NEXT DataTypeId: 0
+CHECK-NEXT TotalAccessDensity: 20
+CHECK-NEXT MinAccessDensity: 20
+CHECK-NEXT MaxAccessDensity: 20
+CHECK-NEXT TotalLifetimeAccessDensity: 20000
+CHECK-NEXT MinLifetimeAccessDensity: 20000
+CHECK-NEXT MaxLifetimeAccessDensity: 20000
+CHECK-NEXT AccessHistogramSize: 3
+CHECK-NEXT AccessHistogram: {{[0-9]+}}
+CHECK-NEXT AccessHistogramValues: -2 -1 -2
+CHECK-NEXT -
+CHECK-NEXT Callstack:
+CHECK-NEXT -
+CHECK-NEXT Function: {{[0-9]+}}
+CHECK-NEXT SymbolName: main
+CHECK-NEXT LineOffset: 10
+CHECK-NEXT Column: 10
+CHECK-NEXT Inline: 0
+CHECK-NEXT MemInfoBlock:
+CHECK-NEXT AllocCount: 1
+CHECK-NEXT TotalAccessCount: 4
+CHECK-NEXT MinAccessCount: 4
+CHECK-NEXT MaxAccessCount: 4
+CHECK-NEXT TotalSize: 48
+CHECK-NEXT MinSize: 48
+CHECK-NEXT MaxSize: 48
+CHECK-NEXT AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT TotalLifetime: 0
+CHECK-NEXT MinLifetime: 0
+CHECK-NEXT MaxLifetime: 0
+CHECK-NEXT AllocCpuId: 11
+CHECK-NEXT DeallocCpuId: 11
+CHECK-NEXT NumMigratedCpu: 0
+CHECK-NEXT NumLifetimeOverlaps: 0
+CHECK-NEXT NumSameAllocCpu: 0
+CHECK-NEXT NumSameDeallocCpu: 0
+CHECK-NEXT DataTypeId: 0
+CHECK-NEXT TotalAccessDensity: 8
+CHECK-NEXT MinAccessDensity: 8
+CHECK-NEXT MaxAccessDensity: 8
+CHECK-NEXT TotalLifetimeAccessDensity: 8000
+CHECK-NEXT MinLifetimeAccessDensity: 8000
+CHECK-NEXT MaxLifetimeAccessDensity: 8000
+CHECK-NEXT AccessHistogramSize: 6
+CHECK-NEXT AccessHistogram: {{[0-9]+}}
+CHECK-NEXT AccessHistogramValues: -2 -0 -0 -0 -1 -1
diff --git a/llvm/test/tools/llvm-profdata/memprof-pic.test b/llvm/test/tools/llvm-profdata/memprof-pic.test
index 78d2c5c..66203ef 100644
--- a/llvm/test/tools/llvm-profdata/memprof-pic.test
+++ b/llvm/test/tools/llvm-profdata/memprof-pic.test
@@ -11,7 +11,7 @@ RUN: llvm-profdata show --memory %p/Inputs/pic.memprofraw --profiled-binary %p/I
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -100,4 +100,4 @@ CHECK-NEXT: TotalLifetimeAccessDensity: 20000
CHECK-NEXT: MinLifetimeAccessDensity: 20000
CHECK-NEXT: MaxLifetimeAccessDensity: 20000
CHECK-NEXT: AccessHistogramSize: 0
-CHECK-NEXT: AccessHistogram: 0 \ No newline at end of file
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/obj2yaml/ELF/eflags.yaml b/llvm/test/tools/obj2yaml/ELF/eflags.yaml
new file mode 100644
index 0000000..da16a62
--- /dev/null
+++ b/llvm/test/tools/obj2yaml/ELF/eflags.yaml
@@ -0,0 +1,31 @@
+## Check how obj2yaml dumps e_flags field.
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2MSB
+ Type: ET_EXEC
+ Machine: EM_SPARC32PLUS
+ Flags: [ [[FLAGS]] ]
+
+# RUN: yaml2obj -DFLAGS="EF_SPARC_32PLUS " %s -o %t2
+# RUN: obj2yaml %t2 | FileCheck %s --check-prefix=FLAG
+
+# FLAG: --- !ELF
+# FLAG-NEXT: FileHeader:
+# FLAG-NEXT: Class: ELFCLASS64
+# FLAG-NEXT: Data: ELFDATA2MSB
+# FLAG-NEXT: Type: ET_EXEC
+# FLAG-NEXT: Machine: EM_SPARC32PLUS
+# FLAG-NEXT: Flags: [ EF_SPARC_32PLUS ]
+
+# RUN: yaml2obj -DFLAGS="EF_SPARC_HAL_R1 " %s -o %t3
+# RUN: obj2yaml %t3 | FileCheck %s --check-prefix=FLAG2
+
+# FLAG2: --- !ELF
+# FLAG2-NEXT: FileHeader:
+# FLAG2-NEXT: Class: ELFCLASS64
+# FLAG2-NEXT: Data: ELFDATA2MSB
+# FLAG2-NEXT: Type: ET_EXEC
+# FLAG2-NEXT: Machine: EM_SPARC32PLUS
+# FLAG2-NEXT: Flags: [ EF_SPARC_HAL_R1 ]
diff --git a/llvm/test/tools/yaml2obj/file-header-flags.yaml b/llvm/test/tools/yaml2obj/file-header-flags.yaml
new file mode 100644
index 0000000..baa101a
--- /dev/null
+++ b/llvm/test/tools/yaml2obj/file-header-flags.yaml
@@ -0,0 +1,25 @@
+## Test for FileHeader Flags.
+
+## When FLAGS variable isn't defined, the e_flags value is 0.
+## Otherwise, it's the specified value.
+
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-readobj -h %t | FileCheck %s --check-prefixes=NO-FLAG
+
+# RUN: yaml2obj %s -o %t -DFLAGS=[EF_SPARC_32PLUS]
+# RUN: llvm-readobj -h %t | FileCheck %s --check-prefixes=FLAG
+
+!ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_SPARC32PLUS
+ Flags: [[FLAGS=<none>]]
+
+# NO-FLAG: Flags [ (0x0)
+# NO-FLAG-NEXT: ]
+
+# FLAG: Flags [ (0x100)
+# FLAG-NEXT: EF_SPARC_32PLUS (0x100)
+# FLAG-NEXT: ]